Liu Song’s Projects


~/Projects/whisper.cpp

git clone https://code.lsong.org/whisper.cpp

Commit

Commit
cb70b07db5a5a1ee41aa6ed4859e35908fc2d120
Author
Georgi Gerganov <[email protected]>
Date
2022-11-26 10:05:37 +0200 +0200
Diffstat
 examples/livestream.sh | 69 +++++++++++++++++++++++++++++++
 examples/stream.wasm/emscripten.cpp | 2 
 examples/talk.wasm/emscripten.cpp | 2 

livestream.sh : simple tool to transcribe audio livestreams (#185)


diff --git a/examples/livestream.sh b/examples/livestream.sh
new file mode 100755
index 0000000000000000000000000000000000000000..18893a3ffcee7a0ef10378606b373b85ef637223
--- /dev/null
+++ b/examples/livestream.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+# Transcribe audio livestream by feeding ffmpeg output to whisper.cpp at regular intervals
+# Idea by @semiformal-net
+# ref: https://github.com/ggerganov/whisper.cpp/issues/185
+#
+# TODO:
+# - Currently, there is a gap between sequential chunks, so some of the words are dropped. Need to figure out a
+#   way to produce a continuous stream of audio chunks.
+#
+
+url="http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/nonuk/sbr_low/ak/bbc_world_service.m3u8"
+step_ms=10000
+model="base.en"
+
+if [ -z "$1" ]; then
+    echo "Usage: $0 stream_url [step_ms] [model]"
+    echo ""
+    echo "  Example:"
+    echo "    $0 $url $step_ms $model"
+    echo ""
+    echo "No url specified, using default: $url"
+else
+    url="$1"
+fi
+
+if [ -n "$2" ]; then
+    step_ms="$2"
+fi
+
+if [ -n "$3" ]; then
+    model="$3"
+fi
+
+# Whisper models
+models=( "tiny.en" "tiny" "base.en" "base" "small.en" "small" "medium.en" "medium" "large" )
+
+# list available models
+function list_models {
+    printf "\n"
+    printf "  Available models:"
+    for model in "${models[@]}"; do
+        printf " $model"
+    done
+    printf "\n\n"
+}
+
+if [[ ! " ${models[@]} " =~ " ${model} " ]]; then
+    printf "Invalid model: $model\n"
+    list_models
+
+    exit 1
+fi
+
+running=1
+
+trap "running=0" SIGINT SIGTERM
+
+printf "[+] Transcribing stream with model '$model', step_ms $step_ms (press Ctrl+C to stop):\n\n"
+
+while [ $running -eq 1 ]; do
+    ffmpeg -y -re -probesize 32 -i $url -ar 16000 -ac 1 -c:a pcm_s16le -t ${step_ms}ms /tmp/whisper-live0.wav > /dev/null 2> /tmp/whisper-live.err
+    if [ $? -ne 0 ]; then
+        printf "Error: ffmpeg failed to capture audio stream\n"
+        exit 1
+    fi
+    mv /tmp/whisper-live0.wav /tmp/whisper-live.wav
+    ./main -t 8 -m ./models/ggml-small.en.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1 &
+done




diff --git a/examples/stream.wasm/emscripten.cpp b/examples/stream.wasm/emscripten.cpp
index f8e3e27d80aa2db8c6b1f40fcce2c3774cc148fc..b75eee365aaab66004be9f9476f957901474b767 100644
--- a/examples/stream.wasm/emscripten.cpp
+++ b/examples/stream.wasm/emscripten.cpp
@@ -51,7 +51,7 @@     wparams.audio_ctx        = 768; // partial encoder context for better performance
 
     wparams.language         = "en";
 
-    printf("stream: using %d threads\n", N_THREAD);
+    printf("stream: using %d threads\n", wparams.n_threads);
 
     std::vector<float> pcmf32;
 




diff --git a/examples/talk.wasm/emscripten.cpp b/examples/talk.wasm/emscripten.cpp
index 501c459287f00c720e170f840a9947b3b1881e61..c82f4696d8ebd156ca5f74f8ca8a797ea624d99b 100644
--- a/examples/talk.wasm/emscripten.cpp
+++ b/examples/talk.wasm/emscripten.cpp
@@ -68,7 +68,7 @@     wparams.language         = "en";
 
     g_gpt2 = gpt2_init("gpt-2.bin");
 
-    printf("talk: using %d threads\n", N_THREAD);
+    printf("talk: using %d threads\n", wparams.n_threads);
 
     std::vector<float> pcmf32;