forked from extern/whisper.cpp
Compare commits
52 Commits
Author | SHA1 | Date | |
---|---|---|---|
4f074fb7a8 | |||
09e9068007 | |||
fa9d43181f | |||
bb6b54a03d | |||
b597c5a779 | |||
a3fb6c507f | |||
59fdcd19c8 | |||
478289a4b3 | |||
5e94129cb2 | |||
72af0f5697 | |||
af005d573f | |||
ad1389003d | |||
f420de1322 | |||
d176160f6f | |||
ca21f7ab16 | |||
373043cabe | |||
fb4d0d470f | |||
0d229163bb | |||
f254e78737 | |||
a94897bcde | |||
2407ae8ef0 | |||
b623ca43b1 | |||
69e6e4644a | |||
09d7d2b68e | |||
0336161b7d | |||
459753342d | |||
9764782bd9 | |||
3b010f9bed | |||
113fcec513 | |||
cfc06bf8df | |||
2bfe0ebc0f | |||
4dd7119deb | |||
ab1916fc59 | |||
a1c1583cc7 | |||
d012b5c7e4 | |||
b2083c5d02 | |||
f3ee4a9673 | |||
c306a7fd89 | |||
b2fc4c7010 | |||
291980369c | |||
86ef64a855 | |||
3b1960520a | |||
2bee2650c6 | |||
beb9512be3 | |||
47737b2e82 | |||
b992f3709e | |||
60337f5306 | |||
02c7516c57 | |||
411ea9b833 | |||
11f61cecd6 | |||
b5ddb16ec7 | |||
ae16c21e9c |
@ -1,13 +1,18 @@
|
|||||||
name: Bindings Tests
|
name: Bindings Tests (Go)
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- bindings/go/**
|
- bindings/go/**
|
||||||
|
- whisper.h
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- bindings/go/**
|
||||||
|
- whisper.h
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
ubuntu-latest:
|
ubuntu-latest:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '^1.19'
|
go-version: '^1.19'
|
22
.github/workflows/bindings-ruby.yml
vendored
Normal file
22
.github/workflows/bindings-ruby.yml
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
name: Bindings Tests (Ruby)
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- bindings/ruby/**
|
||||||
|
- whisper.h
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- bindings/ruby/**
|
||||||
|
- whisper.h
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
ubuntu-latest:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: ruby/setup-ruby@v1
|
||||||
|
with:
|
||||||
|
ruby-version: '3.0'
|
||||||
|
- uses: actions/checkout@v1
|
||||||
|
- run: |
|
||||||
|
cd bindings/ruby/ext
|
||||||
|
ruby extconf.rb && make
|
426
.github/workflows/build.yml
vendored
426
.github/workflows/build.yml
vendored
@ -1,267 +1,267 @@
|
|||||||
name: CI
|
name: CI
|
||||||
on: [push]
|
on: [push, pull_request]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
ubuntu-latest:
|
ubuntu-latest:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v1
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install build-essential
|
sudo apt-get install build-essential
|
||||||
sudo apt-get install libsdl2-dev
|
sudo apt-get install libsdl2-dev
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
make stream
|
make stream
|
||||||
|
|
||||||
macOS-latest:
|
macOS-latest:
|
||||||
runs-on: macOS-latest
|
runs-on: macOS-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v1
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
brew update
|
brew update
|
||||||
brew install sdl2
|
brew install sdl2
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
make stream
|
make stream
|
||||||
|
|
||||||
ubuntu-latest-gcc:
|
ubuntu-latest-gcc:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
build: [Debug, Release]
|
build: [Debug, Release]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v1
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install build-essential
|
sudo apt-get install build-essential
|
||||||
sudo apt-get install cmake
|
sudo apt-get install cmake
|
||||||
sudo apt-get install libsdl2-dev
|
sudo apt-get install libsdl2-dev
|
||||||
|
|
||||||
- name: Configure
|
- name: Configure
|
||||||
run: cmake . -DWHISPER_SUPPORT_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
run: cmake . -DWHISPER_SUPPORT_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
ctest -L gh --output-on-failure
|
ctest -L gh --output-on-failure
|
||||||
|
|
||||||
ubuntu-latest-clang:
|
ubuntu-latest-clang:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
build: [Debug, Release]
|
build: [Debug, Release]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v1
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install build-essential
|
sudo apt-get install build-essential
|
||||||
sudo apt-get install cmake
|
sudo apt-get install cmake
|
||||||
sudo apt-get install libsdl2-dev
|
sudo apt-get install libsdl2-dev
|
||||||
|
|
||||||
- name: Configure
|
- name: Configure
|
||||||
run: cmake . -DWHISPER_SUPPORT_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang
|
run: cmake . -DWHISPER_SUPPORT_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
ctest -L gh --output-on-failure
|
ctest -L gh --output-on-failure
|
||||||
|
|
||||||
ubuntu-latest-gcc-sanitized:
|
ubuntu-latest-gcc-sanitized:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
sanitizer: [ADDRESS, THREAD, UNDEFINED]
|
sanitizer: [ADDRESS, THREAD, UNDEFINED]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v1
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install build-essential
|
sudo apt-get install build-essential
|
||||||
sudo apt-get install cmake
|
sudo apt-get install cmake
|
||||||
|
|
||||||
- name: Configure
|
- name: Configure
|
||||||
run: cmake . -DCMAKE_BUILD_TYPE=Debug -DWHISPER_SANITIZE_${{ matrix.sanitizer }}=ON
|
run: cmake . -DCMAKE_BUILD_TYPE=Debug -DWHISPER_SANITIZE_${{ matrix.sanitizer }}=ON
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
ctest -L gh --output-on-failure
|
ctest -L gh --output-on-failure
|
||||||
|
|
||||||
windows:
|
windows:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
build: [Release]
|
build: [Release]
|
||||||
arch: [Win32, x64]
|
arch: [Win32, x64]
|
||||||
sdl2: [ON]
|
sdl2: [ON]
|
||||||
include:
|
include:
|
||||||
- arch: Win32
|
- arch: Win32
|
||||||
s2arc: x86
|
s2arc: x86
|
||||||
- arch: x64
|
- arch: x64
|
||||||
s2arc: x64
|
s2arc: x64
|
||||||
- sdl2: ON
|
- sdl2: ON
|
||||||
s2ver: 2.26.0
|
s2ver: 2.26.0
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v1
|
||||||
|
|
||||||
- name: Add msbuild to PATH
|
- name: Add msbuild to PATH
|
||||||
uses: microsoft/setup-msbuild@v1
|
uses: microsoft/setup-msbuild@v1
|
||||||
|
|
||||||
- name: Fetch SDL2 and set SDL2_DIR
|
- name: Fetch SDL2 and set SDL2_DIR
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
run: |
|
run: |
|
||||||
C:/msys64/usr/bin/wget.exe -qO sdl2.zip https://github.com/libsdl-org/SDL/releases/download/release-${{ matrix.s2ver }}/SDL2-devel-${{ matrix.s2ver }}-VC.zip
|
C:/msys64/usr/bin/wget.exe -qO sdl2.zip https://github.com/libsdl-org/SDL/releases/download/release-${{ matrix.s2ver }}/SDL2-devel-${{ matrix.s2ver }}-VC.zip
|
||||||
7z x sdl2.zip
|
7z x sdl2.zip
|
||||||
echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV
|
echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV
|
||||||
|
|
||||||
- name: Configure
|
- name: Configure
|
||||||
run: >
|
run: >
|
||||||
cmake -S . -B ./build -A ${{ matrix.arch }}
|
cmake -S . -B ./build -A ${{ matrix.arch }}
|
||||||
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
-DWHISPER_SUPPORT_SDL2=${{ matrix.sdl2 }}
|
-DWHISPER_SUPPORT_SDL2=${{ matrix.sdl2 }}
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
cd ./build
|
cd ./build
|
||||||
msbuild ALL_BUILD.vcxproj -t:build -p:configuration=${{ matrix.build }} -p:platform=${{ matrix.arch }}
|
msbuild ALL_BUILD.vcxproj -t:build -p:configuration=${{ matrix.build }} -p:platform=${{ matrix.arch }}
|
||||||
|
|
||||||
- name: Copy SDL2.dll
|
- name: Copy SDL2.dll
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
- name: Upload binaries
|
- name: Upload binaries
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v1
|
||||||
with:
|
with:
|
||||||
name: whisper-bin-${{ matrix.arch }}
|
name: whisper-bin-${{ matrix.arch }}
|
||||||
path: build/bin/${{ matrix.build }}
|
path: build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
windows-blas:
|
windows-blas:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
build: [Release]
|
build: [Release]
|
||||||
arch: [Win32, x64]
|
arch: [Win32, x64]
|
||||||
blas: [ON]
|
blas: [ON]
|
||||||
sdl2: [ON]
|
sdl2: [ON]
|
||||||
include:
|
include:
|
||||||
- arch: Win32
|
- arch: Win32
|
||||||
obzip: https://github.com/xianyi/OpenBLAS/releases/download/v0.3.21/OpenBLAS-0.3.21-x86.zip
|
obzip: https://github.com/xianyi/OpenBLAS/releases/download/v0.3.21/OpenBLAS-0.3.21-x86.zip
|
||||||
s2arc: x86
|
s2arc: x86
|
||||||
- arch: x64
|
- arch: x64
|
||||||
obzip: https://github.com/xianyi/OpenBLAS/releases/download/v0.3.21/OpenBLAS-0.3.21-x64.zip
|
obzip: https://github.com/xianyi/OpenBLAS/releases/download/v0.3.21/OpenBLAS-0.3.21-x64.zip
|
||||||
s2arc: x64
|
s2arc: x64
|
||||||
- sdl2: ON
|
- sdl2: ON
|
||||||
s2ver: 2.26.0
|
s2ver: 2.26.0
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v1
|
||||||
|
|
||||||
- name: Add msbuild to PATH
|
- name: Add msbuild to PATH
|
||||||
uses: microsoft/setup-msbuild@v1
|
uses: microsoft/setup-msbuild@v1
|
||||||
|
|
||||||
- name: Fetch OpenBLAS
|
- name: Fetch OpenBLAS
|
||||||
if: matrix.blas == 'ON'
|
if: matrix.blas == 'ON'
|
||||||
run: |
|
run: |
|
||||||
C:/msys64/usr/bin/wget.exe -qO blas.zip ${{ matrix.obzip }}
|
C:/msys64/usr/bin/wget.exe -qO blas.zip ${{ matrix.obzip }}
|
||||||
7z x blas.zip -oblas -y
|
7z x blas.zip -oblas -y
|
||||||
copy blas/include/cblas.h .
|
copy blas/include/cblas.h .
|
||||||
copy blas/include/openblas_config.h .
|
copy blas/include/openblas_config.h .
|
||||||
echo "blasdir=$env:GITHUB_WORKSPACE/blas" >> $env:GITHUB_ENV
|
echo "blasdir=$env:GITHUB_WORKSPACE/blas" >> $env:GITHUB_ENV
|
||||||
|
|
||||||
- name: Fetch SDL2 and set SDL2_DIR
|
- name: Fetch SDL2 and set SDL2_DIR
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
run: |
|
run: |
|
||||||
C:/msys64/usr/bin/wget.exe -qO sdl2.zip https://github.com/libsdl-org/SDL/releases/download/release-${{ matrix.s2ver }}/SDL2-devel-${{ matrix.s2ver }}-VC.zip
|
C:/msys64/usr/bin/wget.exe -qO sdl2.zip https://github.com/libsdl-org/SDL/releases/download/release-${{ matrix.s2ver }}/SDL2-devel-${{ matrix.s2ver }}-VC.zip
|
||||||
7z x sdl2.zip
|
7z x sdl2.zip
|
||||||
echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV
|
echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV
|
||||||
|
|
||||||
- name: Configure
|
- name: Configure
|
||||||
run: >
|
run: >
|
||||||
cmake -S . -B ./build -A ${{ matrix.arch }}
|
cmake -S . -B ./build -A ${{ matrix.arch }}
|
||||||
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
-DWHISPER_SUPPORT_OPENBLAS=${{ matrix.blas }}
|
-DWHISPER_SUPPORT_OPENBLAS=${{ matrix.blas }}
|
||||||
-DCMAKE_LIBRARY_PATH="$env:blasdir/lib"
|
-DCMAKE_LIBRARY_PATH="$env:blasdir/lib"
|
||||||
-DWHISPER_SUPPORT_SDL2=${{ matrix.sdl2 }}
|
-DWHISPER_SUPPORT_SDL2=${{ matrix.sdl2 }}
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
cd ./build
|
cd ./build
|
||||||
msbuild ALL_BUILD.vcxproj -t:build -p:configuration=${{ matrix.build }} -p:platform=${{ matrix.arch }}
|
msbuild ALL_BUILD.vcxproj -t:build -p:configuration=${{ matrix.build }} -p:platform=${{ matrix.arch }}
|
||||||
|
|
||||||
- name: Copy libopenblas.dll
|
- name: Copy libopenblas.dll
|
||||||
if: matrix.blas == 'ON'
|
if: matrix.blas == 'ON'
|
||||||
run: copy "$env:blasdir/bin/libopenblas.dll" build/bin/${{ matrix.build }}
|
run: copy "$env:blasdir/bin/libopenblas.dll" build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
- name: Copy SDL2.dll
|
- name: Copy SDL2.dll
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
- name: Upload binaries
|
- name: Upload binaries
|
||||||
if: matrix.blas == 'ON' && matrix.sdl2 == 'ON'
|
if: matrix.blas == 'ON' && matrix.sdl2 == 'ON'
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v1
|
||||||
with:
|
with:
|
||||||
name: whisper-blas-bin-${{ matrix.arch }}
|
name: whisper-blas-bin-${{ matrix.arch }}
|
||||||
path: build/bin/${{ matrix.build }}
|
path: build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
emscripten:
|
emscripten:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
build: [Release]
|
build: [Release]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v1
|
uses: actions/checkout@v1
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
wget -q https://github.com/emscripten-core/emsdk/archive/master.tar.gz
|
wget -q https://github.com/emscripten-core/emsdk/archive/master.tar.gz
|
||||||
tar -xvf master.tar.gz
|
tar -xvf master.tar.gz
|
||||||
emsdk-master/emsdk update
|
emsdk-master/emsdk update
|
||||||
emsdk-master/emsdk install latest
|
emsdk-master/emsdk install latest
|
||||||
emsdk-master/emsdk activate latest
|
emsdk-master/emsdk activate latest
|
||||||
|
|
||||||
- name: Configure
|
- name: Configure
|
||||||
run: echo "tmp"
|
run: echo "tmp"
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
pushd emsdk-master
|
pushd emsdk-master
|
||||||
source ./emsdk_env.sh
|
source ./emsdk_env.sh
|
||||||
popd
|
popd
|
||||||
emcmake cmake . -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
emcmake cmake . -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
make
|
make
|
||||||
|
48
.github/workflows/examples.yml
vendored
Normal file
48
.github/workflows/examples.yml
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
name: Examples Tests
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- examples/addon.node/**
|
||||||
|
- whisper.h
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- examples/addon.node/**
|
||||||
|
- whisper.h
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
addon_node-ubuntu-latest:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
node-version: [ 16.x, 18.x ]
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v1
|
||||||
|
|
||||||
|
- name: Dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install build-essential
|
||||||
|
sudo apt-get install cmake
|
||||||
|
sudo apt-get install libsdl2-dev
|
||||||
|
|
||||||
|
- name: Use Node.js ${{ matrix.node-version }}
|
||||||
|
uses: actions/setup-node@v1
|
||||||
|
with:
|
||||||
|
node-version: ${{ matrix.node-version }}
|
||||||
|
cache: 'npm'
|
||||||
|
|
||||||
|
- name: Install package.json dependencies
|
||||||
|
working-directory: ./examples/addon.node
|
||||||
|
run: npm install
|
||||||
|
|
||||||
|
- name: Compile addon.node
|
||||||
|
run: npx cmake-js compile -T whisper-addon -B Release
|
||||||
|
|
||||||
|
- name: Download test model
|
||||||
|
run: |
|
||||||
|
bash ./models/download-ggml-model.sh base.en
|
||||||
|
- name: Test
|
||||||
|
run: |
|
||||||
|
cd examples/addon.node
|
||||||
|
npm run test
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,4 +1,5 @@
|
|||||||
*.o
|
*.o
|
||||||
|
*.a
|
||||||
.cache/
|
.cache/
|
||||||
.vs/
|
.vs/
|
||||||
.vscode/
|
.vscode/
|
||||||
@ -9,6 +10,7 @@ build-em/
|
|||||||
build-debug/
|
build-debug/
|
||||||
build-release/
|
build-release/
|
||||||
build-static/
|
build-static/
|
||||||
|
build-no-accel/
|
||||||
build-sanitize-addr/
|
build-sanitize-addr/
|
||||||
build-sanitize-thread/
|
build-sanitize-thread/
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
cmake_minimum_required (VERSION 3.0)
|
cmake_minimum_required (VERSION 3.0)
|
||||||
|
|
||||||
project(whisper.cpp VERSION 1.1.1)
|
project(whisper.cpp VERSION 1.2.1)
|
||||||
|
|
||||||
# Add path to modules
|
# Add path to modules
|
||||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
||||||
@ -226,10 +226,13 @@ target_compile_definitions(${TARGET} PUBLIC
|
|||||||
${WHISPER_EXTRA_FLAGS}
|
${WHISPER_EXTRA_FLAGS}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
set_target_properties(${TARGET} PROPERTIES PUBLIC_HEADER "whisper.h")
|
||||||
|
|
||||||
install(TARGETS ${TARGET}
|
install(TARGETS ${TARGET}
|
||||||
LIBRARY DESTINATION lib
|
LIBRARY DESTINATION lib
|
||||||
ARCHIVE DESTINATION lib/static
|
ARCHIVE DESTINATION lib/static
|
||||||
RUNTIME DESTINATION bin
|
RUNTIME DESTINATION bin
|
||||||
|
PUBLIC_HEADER DESTINATION include
|
||||||
)
|
)
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -242,7 +245,7 @@ add_subdirectory(bindings)
|
|||||||
# programs, examples and tests
|
# programs, examples and tests
|
||||||
#
|
#
|
||||||
|
|
||||||
if (WHISPER_BUILD_TESTS)
|
if (WHISPER_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
||||||
enable_testing()
|
enable_testing()
|
||||||
add_subdirectory(tests)
|
add_subdirectory(tests)
|
||||||
endif ()
|
endif ()
|
||||||
|
31
Makefile
31
Makefile
@ -30,8 +30,8 @@ endif
|
|||||||
# Compile flags
|
# Compile flags
|
||||||
#
|
#
|
||||||
|
|
||||||
CFLAGS = -I. -O3 -std=c11 -fPIC
|
CFLAGS = -I. -O3 -DNDEBUG -std=c11 -fPIC
|
||||||
CXXFLAGS = -I. -I./examples -O3 -std=c++11 -fPIC
|
CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC
|
||||||
LDFLAGS =
|
LDFLAGS =
|
||||||
|
|
||||||
# OS specific
|
# OS specific
|
||||||
@ -115,11 +115,15 @@ endif
|
|||||||
ifeq ($(UNAME_M),amd64)
|
ifeq ($(UNAME_M),amd64)
|
||||||
CFLAGS += -mavx -mavx2 -mfma -mf16c
|
CFLAGS += -mavx -mavx2 -mfma -mf16c
|
||||||
endif
|
endif
|
||||||
ifeq ($(UNAME_M),ppc64le)
|
ifneq ($(filter ppc64%,$(UNAME_M)),)
|
||||||
POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
|
POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
|
||||||
ifneq (,$(findstring POWER9,$(POWER9_M)))
|
ifneq (,$(findstring POWER9,$(POWER9_M)))
|
||||||
CFLAGS += -mpower9-vector
|
CFLAGS += -mpower9-vector
|
||||||
endif
|
endif
|
||||||
|
# Require c++23's std::byteswap for big-endian support.
|
||||||
|
ifeq ($(UNAME_M),ppc64)
|
||||||
|
CXXFLAGS += -std=c++23 -DGGML_BIG_ENDIAN
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
ifndef WHISPER_NO_ACCELERATE
|
ifndef WHISPER_NO_ACCELERATE
|
||||||
# Mac M1 - include Accelerate framework
|
# Mac M1 - include Accelerate framework
|
||||||
@ -137,6 +141,8 @@ ifdef WHISPER_GPROF
|
|||||||
CXXFLAGS += -pg
|
CXXFLAGS += -pg
|
||||||
endif
|
endif
|
||||||
ifneq ($(filter aarch64%,$(UNAME_M)),)
|
ifneq ($(filter aarch64%,$(UNAME_M)),)
|
||||||
|
CFLAGS += -mcpu=native
|
||||||
|
CXXFLAGS += -mcpu=native
|
||||||
endif
|
endif
|
||||||
ifneq ($(filter armv6%,$(UNAME_M)),)
|
ifneq ($(filter armv6%,$(UNAME_M)),)
|
||||||
# Raspberry Pi 1, 2, 3
|
# Raspberry Pi 1, 2, 3
|
||||||
@ -193,18 +199,21 @@ clean:
|
|||||||
|
|
||||||
CC_SDL=`sdl2-config --cflags --libs`
|
CC_SDL=`sdl2-config --cflags --libs`
|
||||||
|
|
||||||
main: examples/main/main.cpp ggml.o whisper.o
|
SRC_COMMON = examples/common.cpp
|
||||||
$(CXX) $(CXXFLAGS) examples/main/main.cpp ggml.o whisper.o -o main $(LDFLAGS)
|
SRC_COMMON_SDL = examples/common-sdl.cpp
|
||||||
|
|
||||||
|
main: examples/main/main.cpp $(SRC_COMMON) ggml.o whisper.o
|
||||||
|
$(CXX) $(CXXFLAGS) examples/main/main.cpp $(SRC_COMMON) ggml.o whisper.o -o main $(LDFLAGS)
|
||||||
./main -h
|
./main -h
|
||||||
|
|
||||||
stream: examples/stream/stream.cpp ggml.o whisper.o
|
stream: examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o whisper.o
|
||||||
$(CXX) $(CXXFLAGS) examples/stream/stream.cpp ggml.o whisper.o -o stream $(CC_SDL) $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o whisper.o -o stream $(CC_SDL) $(LDFLAGS)
|
||||||
|
|
||||||
command: examples/command/command.cpp ggml.o whisper.o
|
command: examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o whisper.o
|
||||||
$(CXX) $(CXXFLAGS) examples/command/command.cpp ggml.o whisper.o -o command $(CC_SDL) $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o whisper.o -o command $(CC_SDL) $(LDFLAGS)
|
||||||
|
|
||||||
talk: examples/talk/talk.cpp examples/talk/gpt-2.cpp ggml.o whisper.o
|
talk: examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o whisper.o
|
||||||
$(CXX) $(CXXFLAGS) examples/talk/talk.cpp examples/talk/gpt-2.cpp ggml.o whisper.o -o talk $(CC_SDL) $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o whisper.o -o talk $(CC_SDL) $(LDFLAGS)
|
||||||
|
|
||||||
bench: examples/bench/bench.cpp ggml.o whisper.o
|
bench: examples/bench/bench.cpp ggml.o whisper.o
|
||||||
$(CXX) $(CXXFLAGS) examples/bench/bench.cpp ggml.o whisper.o -o bench $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) examples/bench/bench.cpp ggml.o whisper.o -o bench $(LDFLAGS)
|
||||||
|
222
README.md
222
README.md
@ -4,7 +4,7 @@
|
|||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
[](https://www.npmjs.com/package/whisper.cpp/)
|
[](https://www.npmjs.com/package/whisper.cpp/)
|
||||||
|
|
||||||
Stable: [v1.1.1](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.1.1) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
Stable: [v1.2.1](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.2.1) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
||||||
|
|
||||||
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
||||||
|
|
||||||
@ -13,7 +13,7 @@ High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisp
|
|||||||
- AVX intrinsics support for x86 architectures
|
- AVX intrinsics support for x86 architectures
|
||||||
- VSX intrinsics support for POWER architectures
|
- VSX intrinsics support for POWER architectures
|
||||||
- Mixed F16 / F32 precision
|
- Mixed F16 / F32 precision
|
||||||
- Low memory usage (Flash Attention + Flash Forward)
|
- Low memory usage (Flash Attention)
|
||||||
- Zero memory allocations at runtime
|
- Zero memory allocations at runtime
|
||||||
- Runs on the CPU
|
- Runs on the CPU
|
||||||
- [C-style API](https://github.com/ggerganov/whisper.cpp/blob/master/whisper.h)
|
- [C-style API](https://github.com/ggerganov/whisper.cpp/blob/master/whisper.h)
|
||||||
@ -89,35 +89,37 @@ c++ -I. -I./examples -O3 -std=c++11 -pthread examples/main/main.cpp whisper.o gg
|
|||||||
usage: ./main [options] file0.wav file1.wav ...
|
usage: ./main [options] file0.wav file1.wav ...
|
||||||
|
|
||||||
options:
|
options:
|
||||||
-h, --help [default] show this help message and exit
|
-h, --help [default] show this help message and exit
|
||||||
-t N, --threads N [4 ] number of threads to use during computation
|
-t N, --threads N [4 ] number of threads to use during computation
|
||||||
-p N, --processors N [1 ] number of processors to use during computation
|
-p N, --processors N [1 ] number of processors to use during computation
|
||||||
-ot N, --offset-t N [0 ] time offset in milliseconds
|
-ot N, --offset-t N [0 ] time offset in milliseconds
|
||||||
-on N, --offset-n N [0 ] segment index offset
|
-on N, --offset-n N [0 ] segment index offset
|
||||||
-d N, --duration N [0 ] duration of audio to process in milliseconds
|
-d N, --duration N [0 ] duration of audio to process in milliseconds
|
||||||
-mc N, --max-context N [-1 ] maximum number of text context tokens to store
|
-mc N, --max-context N [-1 ] maximum number of text context tokens to store
|
||||||
-ml N, --max-len N [0 ] maximum segment length in characters
|
-ml N, --max-len N [0 ] maximum segment length in characters
|
||||||
-bo N, --best-of N [5 ] number of best candidates to keep
|
-bo N, --best-of N [5 ] number of best candidates to keep
|
||||||
-bs N, --beam-size N [-1 ] beam size for beam search
|
-bs N, --beam-size N [-1 ] beam size for beam search
|
||||||
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
||||||
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
||||||
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
||||||
-su, --speed-up [false ] speed up audio by x2 (reduced accuracy)
|
-su, --speed-up [false ] speed up audio by x2 (reduced accuracy)
|
||||||
-tr, --translate [false ] translate from source language to english
|
-tr, --translate [false ] translate from source language to english
|
||||||
-di, --diarize [false ] stereo audio diarization
|
-di, --diarize [false ] stereo audio diarization
|
||||||
-otxt, --output-txt [false ] output result in a text file
|
-nf, --no-fallback [false ] do not use temperature fallback while decoding
|
||||||
-ovtt, --output-vtt [false ] output result in a vtt file
|
-otxt, --output-txt [false ] output result in a text file
|
||||||
-osrt, --output-srt [false ] output result in a srt file
|
-ovtt, --output-vtt [false ] output result in a vtt file
|
||||||
-owts, --output-words [false ] output script for generating karaoke video
|
-osrt, --output-srt [false ] output result in a srt file
|
||||||
-ocsv, --output-csv [false ] output result in a CSV file
|
-owts, --output-words [false ] output script for generating karaoke video
|
||||||
-ps, --print-special [false ] print special tokens
|
-ocsv, --output-csv [false ] output result in a CSV file
|
||||||
-pc, --print-colors [false ] print colors
|
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
|
||||||
-pp, --print-progress [false ] print progress
|
-ps, --print-special [false ] print special tokens
|
||||||
-nt, --no-timestamps [true ] do not print timestamps
|
-pc, --print-colors [false ] print colors
|
||||||
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
-pp, --print-progress [false ] print progress
|
||||||
--prompt PROMPT [ ] initial prompt
|
-nt, --no-timestamps [true ] do not print timestamps
|
||||||
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
||||||
-f FNAME, --file FNAME [ ] input WAV file path
|
--prompt PROMPT [ ] initial prompt
|
||||||
|
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
||||||
|
-f FNAME, --file FNAME [ ] input WAV file path
|
||||||
|
|
||||||
|
|
||||||
bash ./models/download-ggml-model.sh base.en
|
bash ./models/download-ggml-model.sh base.en
|
||||||
@ -137,7 +139,8 @@ Running base.en on all samples in ./samples ...
|
|||||||
[+] Running base.en on samples/jfk.wav ... (run 'ffplay samples/jfk.wav' to listen)
|
[+] Running base.en on samples/jfk.wav ... (run 'ffplay samples/jfk.wav' to listen)
|
||||||
----------------------------------------------
|
----------------------------------------------
|
||||||
|
|
||||||
whisper_model_load: loading model from 'models/ggml-base.en.bin'
|
whisper_init_from_file: loading model from 'models/ggml-base.en.bin'
|
||||||
|
whisper_model_load: loading model
|
||||||
whisper_model_load: n_vocab = 51864
|
whisper_model_load: n_vocab = 51864
|
||||||
whisper_model_load: n_audio_ctx = 1500
|
whisper_model_load: n_audio_ctx = 1500
|
||||||
whisper_model_load: n_audio_state = 512
|
whisper_model_load: n_audio_state = 512
|
||||||
@ -150,13 +153,14 @@ whisper_model_load: n_text_layer = 6
|
|||||||
whisper_model_load: n_mels = 80
|
whisper_model_load: n_mels = 80
|
||||||
whisper_model_load: f16 = 1
|
whisper_model_load: f16 = 1
|
||||||
whisper_model_load: type = 2
|
whisper_model_load: type = 2
|
||||||
|
whisper_model_load: mem required = 215.00 MB (+ 6.00 MB per decoder)
|
||||||
|
whisper_model_load: kv self size = 5.25 MB
|
||||||
|
whisper_model_load: kv cross size = 17.58 MB
|
||||||
whisper_model_load: adding 1607 extra tokens
|
whisper_model_load: adding 1607 extra tokens
|
||||||
whisper_model_load: mem_required = 506.00 MB
|
whisper_model_load: model ctx = 140.60 MB
|
||||||
whisper_model_load: ggml ctx size = 140.60 MB
|
|
||||||
whisper_model_load: memory size = 22.83 MB
|
|
||||||
whisper_model_load: model size = 140.54 MB
|
whisper_model_load: model size = 140.54 MB
|
||||||
|
|
||||||
system_info: n_threads = 4 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | NEON = 1 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 |
|
system_info: n_threads = 4 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 |
|
||||||
|
|
||||||
main: processing 'samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
|
main: processing 'samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
|
||||||
|
|
||||||
@ -164,12 +168,13 @@ main: processing 'samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 proc
|
|||||||
[00:00:00.000 --> 00:00:11.000] And so my fellow Americans, ask not what your country can do for you, ask what you can do for your country.
|
[00:00:00.000 --> 00:00:11.000] And so my fellow Americans, ask not what your country can do for you, ask what you can do for your country.
|
||||||
|
|
||||||
|
|
||||||
whisper_print_timings: load time = 105.91 ms
|
whisper_print_timings: fallbacks = 0 p / 0 h
|
||||||
whisper_print_timings: mel time = 24.62 ms
|
whisper_print_timings: load time = 113.81 ms
|
||||||
whisper_print_timings: sample time = 3.63 ms
|
whisper_print_timings: mel time = 15.40 ms
|
||||||
whisper_print_timings: encode time = 324.71 ms / 54.12 ms per layer
|
whisper_print_timings: sample time = 11.58 ms / 27 runs ( 0.43 ms per run)
|
||||||
whisper_print_timings: decode time = 83.58 ms / 13.93 ms per layer
|
whisper_print_timings: encode time = 266.60 ms / 1 runs ( 266.60 ms per run)
|
||||||
whisper_print_timings: total time = 542.81 ms
|
whisper_print_timings: decode time = 66.11 ms / 27 runs ( 2.45 ms per run)
|
||||||
|
whisper_print_timings: total time = 476.31 ms
|
||||||
```
|
```
|
||||||
|
|
||||||
The command downloads the `base.en` model converted to custom `ggml` format and runs the inference on all `.wav` samples in the folder `samples`.
|
The command downloads the `base.en` model converted to custom `ggml` format and runs the inference on all `.wav` samples in the folder `samples`.
|
||||||
@ -212,11 +217,11 @@ make large
|
|||||||
|
|
||||||
| Model | Disk | Mem | SHA |
|
| Model | Disk | Mem | SHA |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| tiny | 75 MB | ~390 MB | `bd577a113a864445d4c299885e0cb97d4ba92b5f` |
|
| tiny | 75 MB | ~125 MB | `bd577a113a864445d4c299885e0cb97d4ba92b5f` |
|
||||||
| base | 142 MB | ~500 MB | `465707469ff3a37a2b9b8d8f89f2f99de7299dac` |
|
| base | 142 MB | ~210 MB | `465707469ff3a37a2b9b8d8f89f2f99de7299dac` |
|
||||||
| small | 466 MB | ~1.0 GB | `55356645c2b361a969dfd0ef2c5a50d530afd8d5` |
|
| small | 466 MB | ~600 MB | `55356645c2b361a969dfd0ef2c5a50d530afd8d5` |
|
||||||
| medium | 1.5 GB | ~2.6 GB | `fd9727b6e1217c2f614f9b698455c4ffd82463b4` |
|
| medium | 1.5 GB | ~1.7 GB | `fd9727b6e1217c2f614f9b698455c4ffd82463b4` |
|
||||||
| large | 2.9 GB | ~4.7 GB | `0f4c8e34f21cf1a914c59d8b3ce882345ad349d6` |
|
| large | 2.9 GB | ~3.3 GB | `0f4c8e34f21cf1a914c59d8b3ce882345ad349d6` |
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
|
|
||||||
@ -234,7 +239,8 @@ in about half a minute on a MacBook M1 Pro, using `medium.en` model:
|
|||||||
```java
|
```java
|
||||||
$ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8
|
$ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8
|
||||||
|
|
||||||
whisper_model_load: loading model from 'models/ggml-medium.en.bin'
|
whisper_init_from_file: loading model from 'models/ggml-medium.en.bin'
|
||||||
|
whisper_model_load: loading model
|
||||||
whisper_model_load: n_vocab = 51864
|
whisper_model_load: n_vocab = 51864
|
||||||
whisper_model_load: n_audio_ctx = 1500
|
whisper_model_load: n_audio_ctx = 1500
|
||||||
whisper_model_load: n_audio_state = 1024
|
whisper_model_load: n_audio_state = 1024
|
||||||
@ -247,55 +253,60 @@ whisper_model_load: n_text_layer = 24
|
|||||||
whisper_model_load: n_mels = 80
|
whisper_model_load: n_mels = 80
|
||||||
whisper_model_load: f16 = 1
|
whisper_model_load: f16 = 1
|
||||||
whisper_model_load: type = 4
|
whisper_model_load: type = 4
|
||||||
whisper_model_load: mem_required = 2610.00 MB
|
whisper_model_load: mem required = 1720.00 MB (+ 43.00 MB per decoder)
|
||||||
|
whisper_model_load: kv self size = 42.00 MB
|
||||||
|
whisper_model_load: kv cross size = 140.62 MB
|
||||||
whisper_model_load: adding 1607 extra tokens
|
whisper_model_load: adding 1607 extra tokens
|
||||||
whisper_model_load: ggml ctx size = 1644.97 MB
|
whisper_model_load: model ctx = 1462.35 MB
|
||||||
whisper_model_load: memory size = 182.62 MB
|
whisper_model_load: model size = 1462.12 MB
|
||||||
whisper_model_load: model size = 1462.12 MB
|
|
||||||
|
|
||||||
main: processing 'samples/gb1.wav' (3179750 samples, 198.7 sec), 8 threads, lang = en, task = transcribe, timestamps = 1 ...
|
system_info: n_threads = 8 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 |
|
||||||
|
|
||||||
[00:00.000 --> 00:08.000] My fellow Americans, this day has brought terrible news and great sadness to our country.
|
main: processing 'samples/gb1.wav' (3179750 samples, 198.7 sec), 8 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
|
||||||
[00:08.000 --> 00:17.000] At nine o'clock this morning, Mission Control in Houston lost contact with our Space Shuttle Columbia.
|
|
||||||
[00:17.000 --> 00:23.000] A short time later, debris was seen falling from the skies above Texas.
|
|
||||||
[00:23.000 --> 00:29.000] The Columbia's lost. There are no survivors.
|
|
||||||
[00:29.000 --> 00:32.000] On board was a crew of seven.
|
|
||||||
[00:32.000 --> 00:39.000] Colonel Rick Husband, Lieutenant Colonel Michael Anderson, Commander Laurel Clark,
|
|
||||||
[00:39.000 --> 00:48.000] Captain David Brown, Commander William McCool, Dr. Kultna Shavla, and Ilan Ramon,
|
|
||||||
[00:48.000 --> 00:52.000] a colonel in the Israeli Air Force.
|
|
||||||
[00:52.000 --> 00:58.000] These men and women assumed great risk in the service to all humanity.
|
|
||||||
[00:58.000 --> 01:03.000] In an age when space flight has come to seem almost routine,
|
|
||||||
[01:03.000 --> 01:07.000] it is easy to overlook the dangers of travel by rocket
|
|
||||||
[01:07.000 --> 01:12.000] and the difficulties of navigating the fierce outer atmosphere of the Earth.
|
|
||||||
[01:12.000 --> 01:18.000] These astronauts knew the dangers, and they faced them willingly,
|
|
||||||
[01:18.000 --> 01:23.000] knowing they had a high and noble purpose in life.
|
|
||||||
[01:23.000 --> 01:31.000] Because of their courage and daring and idealism, we will miss them all the more.
|
|
||||||
[01:31.000 --> 01:36.000] All Americans today are thinking as well of the families of these men and women
|
|
||||||
[01:36.000 --> 01:40.000] who have been given this sudden shock and grief.
|
|
||||||
[01:40.000 --> 01:45.000] You're not alone. Our entire nation grieves with you,
|
|
||||||
[01:45.000 --> 01:52.000] and those you love will always have the respect and gratitude of this country.
|
|
||||||
[01:52.000 --> 01:56.000] The cause in which they died will continue.
|
|
||||||
[01:56.000 --> 02:04.000] Mankind is led into the darkness beyond our world by the inspiration of discovery
|
|
||||||
[02:04.000 --> 02:11.000] and the longing to understand. Our journey into space will go on.
|
|
||||||
[02:11.000 --> 02:16.000] In the skies today, we saw destruction and tragedy.
|
|
||||||
[02:16.000 --> 02:22.000] Yet farther than we can see, there is comfort and hope.
|
|
||||||
[02:22.000 --> 02:29.000] In the words of the prophet Isaiah, "Lift your eyes and look to the heavens
|
|
||||||
[02:29.000 --> 02:35.000] who created all these. He who brings out the starry hosts one by one
|
|
||||||
[02:35.000 --> 02:39.000] and calls them each by name."
|
|
||||||
[02:39.000 --> 02:46.000] Because of His great power and mighty strength, not one of them is missing.
|
|
||||||
[02:46.000 --> 02:55.000] The same Creator who names the stars also knows the names of the seven souls we mourn today.
|
|
||||||
[02:55.000 --> 03:01.000] The crew of the shuttle Columbia did not return safely to earth,
|
|
||||||
[03:01.000 --> 03:05.000] yet we can pray that all are safely home.
|
|
||||||
[03:05.000 --> 03:13.000] May God bless the grieving families, and may God continue to bless America.
|
|
||||||
[03:13.000 --> 03:41.000] Audio
|
|
||||||
|
|
||||||
|
|
||||||
whisper_print_timings: load time = 575.92 ms
|
[00:00:00.000 --> 00:00:08.000] My fellow Americans, this day has brought terrible news and great sadness to our country.
|
||||||
whisper_print_timings: mel time = 230.60 ms
|
[00:00:08.000 --> 00:00:17.000] At nine o'clock this morning, Mission Control in Houston lost contact with our Space Shuttle Columbia.
|
||||||
whisper_print_timings: sample time = 73.19 ms
|
[00:00:17.000 --> 00:00:23.000] A short time later, debris was seen falling from the skies above Texas.
|
||||||
whisper_print_timings: encode time = 19552.61 ms / 814.69 ms per layer
|
[00:00:23.000 --> 00:00:29.000] The Columbia's lost. There are no survivors.
|
||||||
whisper_print_timings: decode time = 13249.96 ms / 552.08 ms per layer
|
[00:00:29.000 --> 00:00:32.000] On board was a crew of seven.
|
||||||
whisper_print_timings: total time = 33686.27 ms
|
[00:00:32.000 --> 00:00:39.000] Colonel Rick Husband, Lieutenant Colonel Michael Anderson, Commander Laurel Clark,
|
||||||
|
[00:00:39.000 --> 00:00:48.000] Captain David Brown, Commander William McCool, Dr. Kultna Shavla, and Ilan Ramon,
|
||||||
|
[00:00:48.000 --> 00:00:52.000] a colonel in the Israeli Air Force.
|
||||||
|
[00:00:52.000 --> 00:00:58.000] These men and women assumed great risk in the service to all humanity.
|
||||||
|
[00:00:58.000 --> 00:01:03.000] In an age when space flight has come to seem almost routine,
|
||||||
|
[00:01:03.000 --> 00:01:07.000] it is easy to overlook the dangers of travel by rocket
|
||||||
|
[00:01:07.000 --> 00:01:12.000] and the difficulties of navigating the fierce outer atmosphere of the Earth.
|
||||||
|
[00:01:12.000 --> 00:01:18.000] These astronauts knew the dangers, and they faced them willingly,
|
||||||
|
[00:01:18.000 --> 00:01:23.000] knowing they had a high and noble purpose in life.
|
||||||
|
[00:01:23.000 --> 00:01:31.000] Because of their courage and daring and idealism, we will miss them all the more.
|
||||||
|
[00:01:31.000 --> 00:01:36.000] All Americans today are thinking as well of the families of these men and women
|
||||||
|
[00:01:36.000 --> 00:01:40.000] who have been given this sudden shock and grief.
|
||||||
|
[00:01:40.000 --> 00:01:45.000] You're not alone. Our entire nation grieves with you,
|
||||||
|
[00:01:45.000 --> 00:01:52.000] and those you love will always have the respect and gratitude of this country.
|
||||||
|
[00:01:52.000 --> 00:01:56.000] The cause in which they died will continue.
|
||||||
|
[00:01:56.000 --> 00:02:04.000] Mankind is led into the darkness beyond our world by the inspiration of discovery
|
||||||
|
[00:02:04.000 --> 00:02:11.000] and the longing to understand. Our journey into space will go on.
|
||||||
|
[00:02:11.000 --> 00:02:16.000] In the skies today, we saw destruction and tragedy.
|
||||||
|
[00:02:16.000 --> 00:02:22.000] Yet farther than we can see, there is comfort and hope.
|
||||||
|
[00:02:22.000 --> 00:02:29.000] In the words of the prophet Isaiah, "Lift your eyes and look to the heavens
|
||||||
|
[00:02:29.000 --> 00:02:35.000] who created all these. He who brings out the starry hosts one by one
|
||||||
|
[00:02:35.000 --> 00:02:39.000] and calls them each by name."
|
||||||
|
[00:02:39.000 --> 00:02:46.000] Because of His great power and mighty strength, not one of them is missing.
|
||||||
|
[00:02:46.000 --> 00:02:55.000] The same Creator who names the stars also knows the names of the seven souls we mourn today.
|
||||||
|
[00:02:55.000 --> 00:03:01.000] The crew of the shuttle Columbia did not return safely to earth,
|
||||||
|
[00:03:01.000 --> 00:03:05.000] yet we can pray that all are safely home.
|
||||||
|
[00:03:05.000 --> 00:03:13.000] May God bless the grieving families, and may God continue to bless America.
|
||||||
|
[00:03:13.000 --> 00:03:19.000] [Silence]
|
||||||
|
|
||||||
|
|
||||||
|
whisper_print_timings: fallbacks = 1 p / 0 h
|
||||||
|
whisper_print_timings: load time = 569.03 ms
|
||||||
|
whisper_print_timings: mel time = 146.85 ms
|
||||||
|
whisper_print_timings: sample time = 238.66 ms / 553 runs ( 0.43 ms per run)
|
||||||
|
whisper_print_timings: encode time = 18665.10 ms / 9 runs ( 2073.90 ms per run)
|
||||||
|
whisper_print_timings: decode time = 13090.93 ms / 549 runs ( 23.85 ms per run)
|
||||||
|
whisper_print_timings: total time = 32733.52 ms
|
||||||
```
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@ -321,14 +332,14 @@ to highlight words with high or low confidence:
|
|||||||
|
|
||||||
## Controlling the length of the generated text segments (experimental)
|
## Controlling the length of the generated text segments (experimental)
|
||||||
|
|
||||||
For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`:
|
For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`:
|
||||||
|
|
||||||
```java
|
```java
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16
|
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16
|
||||||
|
|
||||||
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
||||||
...
|
...
|
||||||
system_info: n_threads = 4 / 10 | AVX2 = 0 | AVX512 = 0 | NEON = 1 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 |
|
system_info: n_threads = 4 / 10 | AVX2 = 0 | AVX512 = 0 | NEON = 1 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 |
|
||||||
|
|
||||||
main: processing './samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
|
main: processing './samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
|
||||||
|
|
||||||
@ -352,7 +363,7 @@ The `--max-len` argument can be used to obtain word-level timestamps. Simply use
|
|||||||
|
|
||||||
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
||||||
...
|
...
|
||||||
system_info: n_threads = 4 / 10 | AVX2 = 0 | AVX512 = 0 | NEON = 1 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 |
|
system_info: n_threads = 4 / 10 | AVX2 = 0 | AVX512 = 0 | NEON = 1 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 |
|
||||||
|
|
||||||
main: processing './samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
|
main: processing './samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
|
||||||
|
|
||||||
@ -422,6 +433,19 @@ https://user-images.githubusercontent.com/1991296/199337538-b7b0c7a3-2753-4a88-a
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Video comparison of different models
|
||||||
|
|
||||||
|
Use the [extra/bench-wts.sh](https://github.com/ggerganov/whisper.cpp/blob/master/extra/bench-wts.sh) script to generate a video in the following format:
|
||||||
|
|
||||||
|
```java
|
||||||
|
./extra/bench-wts.sh samples/jfk.wav
|
||||||
|
ffplay ./samples/jfk.wav.all.mp4
|
||||||
|
```
|
||||||
|
|
||||||
|
https://user-images.githubusercontent.com/1991296/223206245-2d36d903-cf8e-4f09-8c3b-eb9f9c39d6fc.mp4
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Benchmarks
|
## Benchmarks
|
||||||
|
|
||||||
In order to have an objective comparison of the performance of the inference across different system configurations,
|
In order to have an objective comparison of the performance of the inference across different system configurations,
|
||||||
@ -453,8 +477,14 @@ in [models](models).
|
|||||||
- [X] Rust: [tazz4843/whisper-rs](https://github.com/tazz4843/whisper-rs) | [#310](https://github.com/ggerganov/whisper.cpp/discussions/310)
|
- [X] Rust: [tazz4843/whisper-rs](https://github.com/tazz4843/whisper-rs) | [#310](https://github.com/ggerganov/whisper.cpp/discussions/310)
|
||||||
- [X] Javascript: [bindings/javascript](bindings/javascript) | [#309](https://github.com/ggerganov/whisper.cpp/discussions/309)
|
- [X] Javascript: [bindings/javascript](bindings/javascript) | [#309](https://github.com/ggerganov/whisper.cpp/discussions/309)
|
||||||
- [X] Go: [bindings/go](bindings/go) | [#312](https://github.com/ggerganov/whisper.cpp/discussions/312)
|
- [X] Go: [bindings/go](bindings/go) | [#312](https://github.com/ggerganov/whisper.cpp/discussions/312)
|
||||||
|
- [X] Ruby: [bindings/ruby](bindings/ruby) | [#507](https://github.com/ggerganov/whisper.cpp/discussions/507)
|
||||||
- [X] Objective-C / Swift: [ggerganov/whisper.spm](https://github.com/ggerganov/whisper.spm) | [#313](https://github.com/ggerganov/whisper.cpp/discussions/313)
|
- [X] Objective-C / Swift: [ggerganov/whisper.spm](https://github.com/ggerganov/whisper.spm) | [#313](https://github.com/ggerganov/whisper.cpp/discussions/313)
|
||||||
- [ ] Python: soon | [WIP](https://github.com/ggerganov/whisper.cpp/issues/9)
|
- [X] .NET: | [#422](https://github.com/ggerganov/whisper.cpp/discussions/422)
|
||||||
|
- [sandrohanea/whisper.net](https://github.com/sandrohanea/whisper.net)
|
||||||
|
- [NickDarvey/whisper](https://github.com/NickDarvey/whisper)
|
||||||
|
- [X] Python: | [#9](https://github.com/ggerganov/whisper.cpp/issues/9)
|
||||||
|
- [stlukey/whispercpp.py](https://github.com/stlukey/whispercpp.py) (Cython)
|
||||||
|
- [aarnphm/whispercpp](https://github.com/aarnphm/whispercpp) (Pybind11)
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
|
@ -25,6 +25,8 @@ func Process(model whisper.Model, path string, flags *Flags) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n%s\n", context.SystemInfo())
|
||||||
|
|
||||||
// Open the file
|
// Open the file
|
||||||
fmt.Fprintf(flags.Output(), "Loading %q\n", path)
|
fmt.Fprintf(flags.Output(), "Loading %q\n", path)
|
||||||
fh, err := os.Open(path)
|
fh, err := os.Open(path)
|
||||||
@ -64,10 +66,13 @@ func Process(model whisper.Model, path string, flags *Flags) error {
|
|||||||
|
|
||||||
// Process the data
|
// Process the data
|
||||||
fmt.Fprintf(flags.Output(), " ...processing %q\n", path)
|
fmt.Fprintf(flags.Output(), " ...processing %q\n", path)
|
||||||
|
context.ResetTimings()
|
||||||
if err := context.Process(data, cb); err != nil {
|
if err := context.Process(data, cb); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
context.PrintTimings()
|
||||||
|
|
||||||
// Print out the results
|
// Print out the results
|
||||||
switch {
|
switch {
|
||||||
case flags.GetOut() == "srt":
|
case flags.GetOut() == "srt":
|
||||||
|
@ -49,6 +49,10 @@ func (p *Params) SetSpeedup(v bool) {
|
|||||||
|
|
||||||
// Set language id
|
// Set language id
|
||||||
func (p *Params) SetLanguage(lang int) error {
|
func (p *Params) SetLanguage(lang int) error {
|
||||||
|
if lang == -1 {
|
||||||
|
p.language = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
str := C.whisper_lang_str(C.int(lang))
|
str := C.whisper_lang_str(C.int(lang))
|
||||||
if str == nil {
|
if str == nil {
|
||||||
return ErrInvalidLanguage
|
return ErrInvalidLanguage
|
||||||
@ -66,6 +70,11 @@ func (p *Params) Language() int {
|
|||||||
return int(C.whisper_lang_id(p.language))
|
return int(C.whisper_lang_id(p.language))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Threads available
|
||||||
|
func (p *Params) Threads() int {
|
||||||
|
return int(p.n_threads)
|
||||||
|
}
|
||||||
|
|
||||||
// Set number of threads to use
|
// Set number of threads to use
|
||||||
func (p *Params) SetThreads(threads int) {
|
func (p *Params) SetThreads(threads int) {
|
||||||
p.n_threads = C.int(threads)
|
p.n_threads = C.int(threads)
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
package whisper
|
package whisper
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -44,7 +46,10 @@ func (context *context) SetLanguage(lang string) error {
|
|||||||
if !context.model.IsMultilingual() {
|
if !context.model.IsMultilingual() {
|
||||||
return ErrModelNotMultilingual
|
return ErrModelNotMultilingual
|
||||||
}
|
}
|
||||||
if id := context.model.ctx.Whisper_lang_id(lang); id < 0 {
|
|
||||||
|
if lang == "auto" {
|
||||||
|
context.params.SetLanguage(-1)
|
||||||
|
} else if id := context.model.ctx.Whisper_lang_id(lang); id < 0 {
|
||||||
return ErrUnsupportedLanguage
|
return ErrUnsupportedLanguage
|
||||||
} else if err := context.params.SetLanguage(id); err != nil {
|
} else if err := context.params.SetLanguage(id); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -59,6 +64,10 @@ func (context *context) IsMultilingual() bool {
|
|||||||
|
|
||||||
// Get language
|
// Get language
|
||||||
func (context *context) Language() string {
|
func (context *context) Language() string {
|
||||||
|
id := context.params.Language()
|
||||||
|
if id == -1 {
|
||||||
|
return "auto"
|
||||||
|
}
|
||||||
return whisper.Whisper_lang_str(context.params.Language())
|
return whisper.Whisper_lang_str(context.params.Language())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,6 +116,36 @@ func (context *context) SetMaxTokensPerSegment(n uint) {
|
|||||||
context.params.SetMaxTokensPerSegment(int(n))
|
context.params.SetMaxTokensPerSegment(int(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ResetTimings resets the mode timings. Should be called before processing
|
||||||
|
func (context *context) ResetTimings() {
|
||||||
|
context.model.ctx.Whisper_reset_timings()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintTimings prints the model timings to stdout.
|
||||||
|
func (context *context) PrintTimings() {
|
||||||
|
context.model.ctx.Whisper_print_timings()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SystemInfo returns the system information
|
||||||
|
func (context *context) SystemInfo() string {
|
||||||
|
return fmt.Sprintf("system_info: n_threads = %d / %d | %s\n",
|
||||||
|
context.params.Threads(),
|
||||||
|
runtime.NumCPU(),
|
||||||
|
whisper.Whisper_print_system_info(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use mel data at offset_ms to try and auto-detect the spoken language
|
||||||
|
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first.
|
||||||
|
// Returns the probabilities of all languages.
|
||||||
|
func (context *context) WhisperLangAutoDetect(offset_ms int, n_threads int) ([]float32, error) {
|
||||||
|
langProbs, err := context.model.ctx.Whisper_lang_auto_detect(offset_ms, n_threads)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return langProbs, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Process new sample data and return any errors
|
// Process new sample data and return any errors
|
||||||
func (context *context) Process(data []float32, cb SegmentCallback) error {
|
func (context *context) Process(data []float32, cb SegmentCallback) error {
|
||||||
if context.model.ctx == nil {
|
if context.model.ctx == nil {
|
||||||
|
@ -29,7 +29,7 @@ type Model interface {
|
|||||||
|
|
||||||
// Context is the speach recognition context.
|
// Context is the speach recognition context.
|
||||||
type Context interface {
|
type Context interface {
|
||||||
SetLanguage(string) error // Set the language to use for speech recognition.
|
SetLanguage(string) error // Set the language to use for speech recognition, use "auto" for auto detect language.
|
||||||
SetTranslate(bool) // Set translate flag
|
SetTranslate(bool) // Set translate flag
|
||||||
IsMultilingual() bool // Return true if the model is multilingual.
|
IsMultilingual() bool // Return true if the model is multilingual.
|
||||||
Language() string // Get language
|
Language() string // Get language
|
||||||
@ -60,6 +60,12 @@ type Context interface {
|
|||||||
IsNOT(Token) bool // Test for "No timestamps" token
|
IsNOT(Token) bool // Test for "No timestamps" token
|
||||||
IsLANG(Token, string) bool // Test for token associated with a specific language
|
IsLANG(Token, string) bool // Test for token associated with a specific language
|
||||||
IsText(Token) bool // Test for text token
|
IsText(Token) bool // Test for text token
|
||||||
|
|
||||||
|
// Timings
|
||||||
|
PrintTimings()
|
||||||
|
ResetTimings()
|
||||||
|
|
||||||
|
SystemInfo() string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Segment is the text result of a speech recognition.
|
// Segment is the text result of a speech recognition.
|
||||||
|
@ -94,6 +94,7 @@ func (model *model) NewContext() (Context, error) {
|
|||||||
params.SetPrintRealtime(false)
|
params.SetPrintRealtime(false)
|
||||||
params.SetPrintTimestamps(false)
|
params.SetPrintTimestamps(false)
|
||||||
params.SetThreads(runtime.NumCPU())
|
params.SetThreads(runtime.NumCPU())
|
||||||
|
params.SetNoContext(true)
|
||||||
|
|
||||||
// Return new context
|
// Return new context
|
||||||
return newContext(model, params)
|
return newContext(model, params)
|
||||||
|
@ -20,7 +20,7 @@ extern bool callEncoderBegin(void* user_data);
|
|||||||
// Text segment callback
|
// Text segment callback
|
||||||
// Called on every newly generated text segment
|
// Called on every newly generated text segment
|
||||||
// Use the whisper_full_...() functions to obtain the text segments
|
// Use the whisper_full_...() functions to obtain the text segments
|
||||||
static void whisper_new_segment_cb(struct whisper_context* ctx, int n_new, void* user_data) {
|
static void whisper_new_segment_cb(struct whisper_context* ctx, struct whisper_state* state, int n_new, void* user_data) {
|
||||||
if(user_data != NULL && ctx != NULL) {
|
if(user_data != NULL && ctx != NULL) {
|
||||||
callNewSegment(user_data, n_new);
|
callNewSegment(user_data, n_new);
|
||||||
}
|
}
|
||||||
@ -29,7 +29,7 @@ static void whisper_new_segment_cb(struct whisper_context* ctx, int n_new, void*
|
|||||||
// Encoder begin callback
|
// Encoder begin callback
|
||||||
// If not NULL, called before the encoder starts
|
// If not NULL, called before the encoder starts
|
||||||
// If it returns false, the computation is aborted
|
// If it returns false, the computation is aborted
|
||||||
static bool whisper_encoder_begin_cb(struct whisper_context* ctx, void* user_data) {
|
static bool whisper_encoder_begin_cb(struct whisper_context* ctx, struct whisper_state* state, void* user_data) {
|
||||||
if(user_data != NULL && ctx != NULL) {
|
if(user_data != NULL && ctx != NULL) {
|
||||||
return callEncoderBegin(user_data);
|
return callEncoderBegin(user_data);
|
||||||
}
|
}
|
||||||
|
Submodule bindings/ios updated: 9653b42eb4...92d4c5c9a0
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "whisper.cpp",
|
"name": "whisper.cpp",
|
||||||
"version": "1.1.1",
|
"version": "1.2.1",
|
||||||
"description": "Whisper speech recognition",
|
"description": "Whisper speech recognition",
|
||||||
"main": "whisper.js",
|
"main": "whisper.js",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
File diff suppressed because one or more lines are too long
7
bindings/ruby/ext/.gitignore
vendored
Normal file
7
bindings/ruby/ext/.gitignore
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
Makefile
|
||||||
|
ggml.c
|
||||||
|
ggml.h
|
||||||
|
whisper.bundle
|
||||||
|
whisper.cpp
|
||||||
|
whisper.h
|
||||||
|
dr_wav.h
|
21
bindings/ruby/ext/extconf.rb
Normal file
21
bindings/ruby/ext/extconf.rb
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
require 'mkmf'
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper.cpp')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper.h')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml.h')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml.c')} .")
|
||||||
|
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','examples','dr_wav.h')} .")
|
||||||
|
|
||||||
|
|
||||||
|
# need to use c++ compiler flags
|
||||||
|
$CXXFLAGS << ' -std=c++11'
|
||||||
|
# Set to true when building binary gems
|
||||||
|
if enable_config('static-stdlib', false)
|
||||||
|
$LDFLAGS << ' -static-libgcc -static-libstdc++'
|
||||||
|
end
|
||||||
|
|
||||||
|
if enable_config('march-tune-native', false)
|
||||||
|
$CFLAGS << ' -march=native -mtune=native'
|
||||||
|
$CXXFLAGS << ' -march=native -mtune=native'
|
||||||
|
end
|
||||||
|
|
||||||
|
create_makefile('whisper')
|
426
bindings/ruby/ext/ruby_whisper.cpp
Normal file
426
bindings/ruby/ext/ruby_whisper.cpp
Normal file
@ -0,0 +1,426 @@
|
|||||||
|
#include <ruby.h>
|
||||||
|
#include "ruby_whisper.h"
|
||||||
|
#define DR_WAV_IMPLEMENTATION
|
||||||
|
#include "dr_wav.h"
|
||||||
|
#include <cmath>
|
||||||
|
#include <fstream>
|
||||||
|
#include <cstdio>
|
||||||
|
#include <string>
|
||||||
|
#include <thread>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define BOOL_PARAMS_SETTER(self, prop, value) \
|
||||||
|
ruby_whisper_params *rwp; \
|
||||||
|
Data_Get_Struct(self, ruby_whisper_params, rwp); \
|
||||||
|
if (value == Qfalse || value == Qnil) { \
|
||||||
|
rwp->params.prop = false; \
|
||||||
|
} else { \
|
||||||
|
rwp->params.prop = true; \
|
||||||
|
} \
|
||||||
|
return value; \
|
||||||
|
|
||||||
|
#define BOOL_PARAMS_GETTER(self, prop) \
|
||||||
|
ruby_whisper_params *rwp; \
|
||||||
|
Data_Get_Struct(self, ruby_whisper_params, rwp); \
|
||||||
|
if (rwp->params.prop) { \
|
||||||
|
return Qtrue; \
|
||||||
|
} else { \
|
||||||
|
return Qfalse; \
|
||||||
|
}
|
||||||
|
|
||||||
|
VALUE mWhisper;
|
||||||
|
VALUE cContext;
|
||||||
|
VALUE cParams;
|
||||||
|
|
||||||
|
static void ruby_whisper_free(ruby_whisper *rw) {
|
||||||
|
if (rw->context) {
|
||||||
|
whisper_free(rw->context);
|
||||||
|
rw->context = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static void ruby_whisper_params_free(ruby_whisper_params *rwp) {
|
||||||
|
}
|
||||||
|
|
||||||
|
void rb_whisper_mark(ruby_whisper *rw) {
|
||||||
|
// call rb_gc_mark on any ruby references in rw
|
||||||
|
}
|
||||||
|
|
||||||
|
void rb_whisper_free(ruby_whisper *rw) {
|
||||||
|
ruby_whisper_free(rw);
|
||||||
|
free(rw);
|
||||||
|
}
|
||||||
|
|
||||||
|
void rb_whisper_params_mark(ruby_whisper_params *rwp) {
|
||||||
|
}
|
||||||
|
|
||||||
|
void rb_whisper_params_free(ruby_whisper_params *rwp) {
|
||||||
|
ruby_whisper_params_free(rwp);
|
||||||
|
free(rwp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static VALUE ruby_whisper_allocate(VALUE klass) {
|
||||||
|
ruby_whisper *rw;
|
||||||
|
rw = ALLOC(ruby_whisper);
|
||||||
|
rw->context = NULL;
|
||||||
|
return Data_Wrap_Struct(klass, rb_whisper_mark, rb_whisper_free, rw);
|
||||||
|
}
|
||||||
|
|
||||||
|
static VALUE ruby_whisper_params_allocate(VALUE klass) {
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
rwp = ALLOC(ruby_whisper_params);
|
||||||
|
rwp->params = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
|
||||||
|
return Data_Wrap_Struct(klass, rb_whisper_params_mark, rb_whisper_params_free, rwp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static VALUE ruby_whisper_initialize(int argc, VALUE *argv, VALUE self) {
|
||||||
|
ruby_whisper *rw;
|
||||||
|
VALUE whisper_model_file_path;
|
||||||
|
|
||||||
|
// TODO: we can support init from buffer here too maybe another ruby object to expose
|
||||||
|
rb_scan_args(argc, argv, "01", &whisper_model_file_path);
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
|
||||||
|
if (!rb_respond_to(whisper_model_file_path, rb_intern("to_s"))) {
|
||||||
|
rb_raise(rb_eRuntimeError, "Expected file path to model to initialize Whisper::Context");
|
||||||
|
}
|
||||||
|
rw->context = whisper_init_from_file(StringValueCStr(whisper_model_file_path));
|
||||||
|
if (rw->context == nullptr) {
|
||||||
|
rb_raise(rb_eRuntimeError, "error: failed to initialize whisper context");
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* transcribe a single file
|
||||||
|
* can emit to a block results
|
||||||
|
*
|
||||||
|
**/
|
||||||
|
static VALUE ruby_whisper_transcribe(int argc, VALUE *argv, VALUE self) {
|
||||||
|
ruby_whisper *rw;
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
VALUE wave_file_path, blk, params;
|
||||||
|
|
||||||
|
rb_scan_args(argc, argv, "02&", &wave_file_path, ¶ms, &blk);
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
Data_Get_Struct(params, ruby_whisper_params, rwp);
|
||||||
|
|
||||||
|
if (!rb_respond_to(wave_file_path, rb_intern("to_s"))) {
|
||||||
|
rb_raise(rb_eRuntimeError, "Expected file path to wave file");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string fname_inp = StringValueCStr(wave_file_path);
|
||||||
|
|
||||||
|
std::vector<float> pcmf32; // mono-channel F32 PCM
|
||||||
|
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
||||||
|
|
||||||
|
// WAV input - this is directly from main.cpp example
|
||||||
|
{
|
||||||
|
drwav wav;
|
||||||
|
std::vector<uint8_t> wav_data; // used for pipe input from stdin
|
||||||
|
|
||||||
|
if (fname_inp == "-") {
|
||||||
|
{
|
||||||
|
uint8_t buf[1024];
|
||||||
|
while (true) {
|
||||||
|
const size_t n = fread(buf, 1, sizeof(buf), stdin);
|
||||||
|
if (n == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
wav_data.insert(wav_data.end(), buf, buf + n);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (drwav_init_memory(&wav, wav_data.data(), wav_data.size(), nullptr) == false) {
|
||||||
|
fprintf(stderr, "error: failed to open WAV file from stdin\n");
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: read %zu bytes from stdin\n", __func__, wav_data.size());
|
||||||
|
} else if (drwav_init_file(&wav, fname_inp.c_str(), nullptr) == false) {
|
||||||
|
fprintf(stderr, "error: failed to open '%s' as WAV file\n", fname_inp.c_str());
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (wav.channels != 1 && wav.channels != 2) {
|
||||||
|
fprintf(stderr, "WAV file '%s' must be mono or stereo\n", fname_inp.c_str());
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rwp->diarize && wav.channels != 2 && rwp->params.print_timestamps == false) {
|
||||||
|
fprintf(stderr, "WAV file '%s' must be stereo for diarization and timestamps have to be enabled\n", fname_inp.c_str());
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (wav.sampleRate != WHISPER_SAMPLE_RATE) {
|
||||||
|
fprintf(stderr, "WAV file '%s' must be %i kHz\n", fname_inp.c_str(), WHISPER_SAMPLE_RATE/1000);
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (wav.bitsPerSample != 16) {
|
||||||
|
fprintf(stderr, "WAV file '%s' must be 16-bit\n", fname_inp.c_str());
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
const uint64_t n = wav_data.empty() ? wav.totalPCMFrameCount : wav_data.size()/(wav.channels*wav.bitsPerSample/8);
|
||||||
|
|
||||||
|
std::vector<int16_t> pcm16;
|
||||||
|
pcm16.resize(n*wav.channels);
|
||||||
|
drwav_read_pcm_frames_s16(&wav, n, pcm16.data());
|
||||||
|
drwav_uninit(&wav);
|
||||||
|
|
||||||
|
// convert to mono, float
|
||||||
|
pcmf32.resize(n);
|
||||||
|
if (wav.channels == 1) {
|
||||||
|
for (uint64_t i = 0; i < n; i++) {
|
||||||
|
pcmf32[i] = float(pcm16[i])/32768.0f;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (uint64_t i = 0; i < n; i++) {
|
||||||
|
pcmf32[i] = float(pcm16[2*i] + pcm16[2*i + 1])/65536.0f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rwp->diarize) {
|
||||||
|
// convert to stereo, float
|
||||||
|
pcmf32s.resize(2);
|
||||||
|
|
||||||
|
pcmf32s[0].resize(n);
|
||||||
|
pcmf32s[1].resize(n);
|
||||||
|
for (uint64_t i = 0; i < n; i++) {
|
||||||
|
pcmf32s[0][i] = float(pcm16[2*i])/32768.0f;
|
||||||
|
pcmf32s[1][i] = float(pcm16[2*i + 1])/32768.0f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
||||||
|
|
||||||
|
rwp->params.encoder_begin_callback = [](struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, void * user_data) {
|
||||||
|
bool is_aborted = *(bool*)user_data;
|
||||||
|
return !is_aborted;
|
||||||
|
};
|
||||||
|
rwp->params.encoder_begin_callback_user_data = &is_aborted;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (whisper_full_parallel(rw->context, rwp->params, pcmf32.data(), pcmf32.size(), 1) != 0) {
|
||||||
|
fprintf(stderr, "failed to process audio\n");
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
const int n_segments = whisper_full_n_segments(rw->context);
|
||||||
|
VALUE output = rb_str_new2("");
|
||||||
|
for (int i = 0; i < n_segments; ++i) {
|
||||||
|
const char * text = whisper_full_get_segment_text(rw->context, i);
|
||||||
|
output = rb_str_concat(output, rb_str_new2(text));
|
||||||
|
}
|
||||||
|
VALUE idCall = rb_intern("call");
|
||||||
|
if (blk != Qnil) {
|
||||||
|
rb_funcall(blk, idCall, 1, output);
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* params.language = "auto" | "en", etc...
|
||||||
|
*/
|
||||||
|
static VALUE ruby_whisper_params_set_language(VALUE self, VALUE value) {
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_params, rwp);
|
||||||
|
if (value == Qfalse || value == Qnil) {
|
||||||
|
rwp->params.language = "auto";
|
||||||
|
} else {
|
||||||
|
rwp->params.language = StringValueCStr(value);
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_language(VALUE self) {
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_params, rwp);
|
||||||
|
if (rwp->params.language) {
|
||||||
|
return rb_str_new2(rwp->params.language);
|
||||||
|
} else {
|
||||||
|
return rb_str_new2("auto");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_translate(VALUE self, VALUE value) {
|
||||||
|
BOOL_PARAMS_SETTER(self, translate, value)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_translate(VALUE self) {
|
||||||
|
BOOL_PARAMS_GETTER(self, translate)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_no_context(VALUE self, VALUE value) {
|
||||||
|
BOOL_PARAMS_SETTER(self, no_context, value)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_no_context(VALUE self) {
|
||||||
|
BOOL_PARAMS_GETTER(self, no_context)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_single_segment(VALUE self, VALUE value) {
|
||||||
|
BOOL_PARAMS_SETTER(self, single_segment, value)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_single_segment(VALUE self) {
|
||||||
|
BOOL_PARAMS_GETTER(self, single_segment)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_print_special(VALUE self, VALUE value) {
|
||||||
|
BOOL_PARAMS_SETTER(self, print_special, value)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_print_special(VALUE self) {
|
||||||
|
BOOL_PARAMS_GETTER(self, print_special)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_print_progress(VALUE self, VALUE value) {
|
||||||
|
BOOL_PARAMS_SETTER(self, print_progress, value)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_print_progress(VALUE self) {
|
||||||
|
BOOL_PARAMS_GETTER(self, print_progress)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_print_realtime(VALUE self, VALUE value) {
|
||||||
|
BOOL_PARAMS_SETTER(self, print_realtime, value)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_print_realtime(VALUE self) {
|
||||||
|
BOOL_PARAMS_GETTER(self, print_realtime)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_print_timestamps(VALUE self, VALUE value) {
|
||||||
|
BOOL_PARAMS_SETTER(self, print_timestamps, value)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_print_timestamps(VALUE self) {
|
||||||
|
BOOL_PARAMS_GETTER(self, print_timestamps)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_suppress_blank(VALUE self, VALUE value) {
|
||||||
|
BOOL_PARAMS_SETTER(self, suppress_blank, value)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_suppress_blank(VALUE self) {
|
||||||
|
BOOL_PARAMS_GETTER(self, suppress_blank)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_suppress_non_speech_tokens(VALUE self, VALUE value) {
|
||||||
|
BOOL_PARAMS_SETTER(self, suppress_non_speech_tokens, value)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_suppress_non_speech_tokens(VALUE self) {
|
||||||
|
BOOL_PARAMS_GETTER(self, suppress_non_speech_tokens)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_token_timestamps(VALUE self) {
|
||||||
|
BOOL_PARAMS_GETTER(self, token_timestamps)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_token_timestamps(VALUE self, VALUE value) {
|
||||||
|
BOOL_PARAMS_SETTER(self, token_timestamps, value)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_split_on_word(VALUE self) {
|
||||||
|
BOOL_PARAMS_GETTER(self, split_on_word)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_split_on_word(VALUE self, VALUE value) {
|
||||||
|
BOOL_PARAMS_SETTER(self, split_on_word, value)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_speed_up(VALUE self) {
|
||||||
|
BOOL_PARAMS_GETTER(self, speed_up)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_speed_up(VALUE self, VALUE value) {
|
||||||
|
BOOL_PARAMS_SETTER(self, speed_up, value)
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_diarize(VALUE self) {
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_params, rwp);
|
||||||
|
if (rwp->diarize) {
|
||||||
|
return Qtrue;
|
||||||
|
} else {
|
||||||
|
return Qfalse;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_diarize(VALUE self, VALUE value) {
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_params, rwp);
|
||||||
|
if (value == Qfalse || value == Qnil) {
|
||||||
|
rwp->diarize = false;
|
||||||
|
} else {
|
||||||
|
rwp->diarize = true;
|
||||||
|
} \
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
static VALUE ruby_whisper_params_get_offset(VALUE self) {
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_params, rwp);
|
||||||
|
return INT2NUM(rwp->params.offset_ms);
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_offset(VALUE self, VALUE value) {
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_params, rwp);
|
||||||
|
rwp->params.offset_ms = NUM2INT(value);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_get_duration(VALUE self) {
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_params, rwp);
|
||||||
|
return INT2NUM(rwp->params.duration_ms);
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_duration(VALUE self, VALUE value) {
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_params, rwp);
|
||||||
|
rwp->params.duration_ms = NUM2INT(value);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
static VALUE ruby_whisper_params_get_max_text_tokens(VALUE self) {
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_params, rwp);
|
||||||
|
return INT2NUM(rwp->params.n_max_text_ctx);
|
||||||
|
}
|
||||||
|
static VALUE ruby_whisper_params_set_max_text_tokens(VALUE self, VALUE value) {
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_params, rwp);
|
||||||
|
rwp->params.n_max_text_ctx = NUM2INT(value);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Init_whisper() {
|
||||||
|
mWhisper = rb_define_module("Whisper");
|
||||||
|
cContext = rb_define_class_under(mWhisper, "Context", rb_cObject);
|
||||||
|
cParams = rb_define_class_under(mWhisper, "Params", rb_cObject);
|
||||||
|
|
||||||
|
rb_define_alloc_func(cContext, ruby_whisper_allocate);
|
||||||
|
rb_define_method(cContext, "initialize", ruby_whisper_initialize, -1);
|
||||||
|
|
||||||
|
rb_define_method(cContext, "transcribe", ruby_whisper_transcribe, -1);
|
||||||
|
|
||||||
|
rb_define_alloc_func(cParams, ruby_whisper_params_allocate);
|
||||||
|
|
||||||
|
rb_define_method(cParams, "language=", ruby_whisper_params_set_language, 1);
|
||||||
|
rb_define_method(cParams, "language", ruby_whisper_params_get_language, 0);
|
||||||
|
rb_define_method(cParams, "translate=", ruby_whisper_params_set_translate, 1);
|
||||||
|
rb_define_method(cParams, "translate", ruby_whisper_params_get_translate, 0);
|
||||||
|
rb_define_method(cParams, "no_context=", ruby_whisper_params_set_no_context, 1);
|
||||||
|
rb_define_method(cParams, "no_context", ruby_whisper_params_get_no_context, 0);
|
||||||
|
rb_define_method(cParams, "single_segment=", ruby_whisper_params_set_single_segment, 1);
|
||||||
|
rb_define_method(cParams, "single_segment", ruby_whisper_params_get_single_segment, 0);
|
||||||
|
rb_define_method(cParams, "print_special", ruby_whisper_params_get_print_special, 0);
|
||||||
|
rb_define_method(cParams, "print_special=", ruby_whisper_params_set_print_special, 1);
|
||||||
|
rb_define_method(cParams, "print_progress", ruby_whisper_params_get_print_progress, 0);
|
||||||
|
rb_define_method(cParams, "print_progress=", ruby_whisper_params_set_print_progress, 1);
|
||||||
|
rb_define_method(cParams, "print_realtime", ruby_whisper_params_get_print_realtime, 0);
|
||||||
|
rb_define_method(cParams, "print_realtime=", ruby_whisper_params_set_print_realtime, 1);
|
||||||
|
rb_define_method(cParams, "print_timestamps", ruby_whisper_params_get_print_timestamps, 0);
|
||||||
|
rb_define_method(cParams, "print_timestamps=", ruby_whisper_params_set_print_timestamps, 1);
|
||||||
|
rb_define_method(cParams, "suppress_blank", ruby_whisper_params_get_suppress_blank, 0);
|
||||||
|
rb_define_method(cParams, "suppress_blank=", ruby_whisper_params_set_suppress_blank, 1);
|
||||||
|
rb_define_method(cParams, "suppress_non_speech_tokens", ruby_whisper_params_get_suppress_non_speech_tokens, 0);
|
||||||
|
rb_define_method(cParams, "suppress_non_speech_tokens=", ruby_whisper_params_set_suppress_non_speech_tokens, 1);
|
||||||
|
rb_define_method(cParams, "token_timestamps", ruby_whisper_params_get_token_timestamps, 0);
|
||||||
|
rb_define_method(cParams, "token_timestamps=", ruby_whisper_params_set_token_timestamps, 1);
|
||||||
|
rb_define_method(cParams, "split_on_word", ruby_whisper_params_get_split_on_word, 0);
|
||||||
|
rb_define_method(cParams, "split_on_word=", ruby_whisper_params_set_split_on_word, 1);
|
||||||
|
rb_define_method(cParams, "speed_up", ruby_whisper_params_get_speed_up, 0);
|
||||||
|
rb_define_method(cParams, "speed_up=", ruby_whisper_params_set_speed_up, 1);
|
||||||
|
rb_define_method(cParams, "diarize", ruby_whisper_params_get_diarize, 0);
|
||||||
|
rb_define_method(cParams, "diarize=", ruby_whisper_params_set_diarize, 1);
|
||||||
|
|
||||||
|
rb_define_method(cParams, "offset", ruby_whisper_params_get_offset, 0);
|
||||||
|
rb_define_method(cParams, "offset=", ruby_whisper_params_set_offset, 1);
|
||||||
|
rb_define_method(cParams, "duration", ruby_whisper_params_get_duration, 0);
|
||||||
|
rb_define_method(cParams, "duration=", ruby_whisper_params_set_duration, 1);
|
||||||
|
|
||||||
|
rb_define_method(cParams, "max_text_tokens", ruby_whisper_params_get_max_text_tokens, 0);
|
||||||
|
rb_define_method(cParams, "max_text_tokens=", ruby_whisper_params_set_max_text_tokens, 1);
|
||||||
|
}
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
15
bindings/ruby/ext/ruby_whisper.h
Normal file
15
bindings/ruby/ext/ruby_whisper.h
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
#ifndef __RUBY_WHISPER_H
|
||||||
|
#define __RUBY_WHISPER_H
|
||||||
|
|
||||||
|
#include "whisper.h"
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
struct whisper_context *context;
|
||||||
|
} ruby_whisper;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
struct whisper_full_params params;
|
||||||
|
bool diarize;
|
||||||
|
} ruby_whisper_params;
|
||||||
|
|
||||||
|
#endif
|
138
bindings/ruby/tests/test_whisper.rb
Normal file
138
bindings/ruby/tests/test_whisper.rb
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
TOPDIR = File.expand_path(File.join(File.dirname(__FILE__), '..'))
|
||||||
|
EXTDIR = File.join(TOPDIR, 'ext')
|
||||||
|
#$LIBDIR = File.join(TOPDIR, 'lib')
|
||||||
|
#$:.unshift(LIBDIR)
|
||||||
|
$:.unshift(EXTDIR)
|
||||||
|
|
||||||
|
require 'whisper'
|
||||||
|
require 'test/unit'
|
||||||
|
|
||||||
|
class TestWhisper < Test::Unit::TestCase
|
||||||
|
def setup
|
||||||
|
@params = Whisper::Params.new
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_language
|
||||||
|
@params.language = "en"
|
||||||
|
assert_equal @params.language, "en"
|
||||||
|
@params.language = "auto"
|
||||||
|
assert_equal @params.language, "auto"
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_offset
|
||||||
|
@params.offset = 10_000
|
||||||
|
assert_equal @params.offset, 10_000
|
||||||
|
@params.offset = 0
|
||||||
|
assert_equal @params.offset, 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_duration
|
||||||
|
@params.duration = 60_000
|
||||||
|
assert_equal @params.duration, 60_000
|
||||||
|
@params.duration = 0
|
||||||
|
assert_equal @params.duration, 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_max_text_tokens
|
||||||
|
@params.max_text_tokens = 300
|
||||||
|
assert_equal @params.max_text_tokens, 300
|
||||||
|
@params.max_text_tokens = 0
|
||||||
|
assert_equal @params.max_text_tokens, 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_translate
|
||||||
|
@params.translate = true
|
||||||
|
assert @params.translate
|
||||||
|
@params.translate = false
|
||||||
|
assert !@params.translate
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_no_context
|
||||||
|
@params.no_context = true
|
||||||
|
assert @params.no_context
|
||||||
|
@params.no_context = false
|
||||||
|
assert !@params.no_context
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_single_segment
|
||||||
|
@params.single_segment = true
|
||||||
|
assert @params.single_segment
|
||||||
|
@params.single_segment = false
|
||||||
|
assert !@params.single_segment
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_print_special
|
||||||
|
@params.print_special = true
|
||||||
|
assert @params.print_special
|
||||||
|
@params.print_special = false
|
||||||
|
assert !@params.print_special
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_print_progress
|
||||||
|
@params.print_progress = true
|
||||||
|
assert @params.print_progress
|
||||||
|
@params.print_progress = false
|
||||||
|
assert !@params.print_progress
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_print_realtime
|
||||||
|
@params.print_realtime = true
|
||||||
|
assert @params.print_realtime
|
||||||
|
@params.print_realtime = false
|
||||||
|
assert !@params.print_realtime
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_print_timestamps
|
||||||
|
@params.print_timestamps = true
|
||||||
|
assert @params.print_timestamps
|
||||||
|
@params.print_timestamps = false
|
||||||
|
assert !@params.print_timestamps
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_suppress_blank
|
||||||
|
@params.suppress_blank = true
|
||||||
|
assert @params.suppress_blank
|
||||||
|
@params.suppress_blank = false
|
||||||
|
assert !@params.suppress_blank
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_suppress_non_speech_tokens
|
||||||
|
@params.suppress_non_speech_tokens = true
|
||||||
|
assert @params.suppress_non_speech_tokens
|
||||||
|
@params.suppress_non_speech_tokens = false
|
||||||
|
assert !@params.suppress_non_speech_tokens
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_token_timestamps
|
||||||
|
@params.token_timestamps = true
|
||||||
|
assert @params.token_timestamps
|
||||||
|
@params.token_timestamps = false
|
||||||
|
assert !@params.token_timestamps
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_split_on_word
|
||||||
|
@params.split_on_word = true
|
||||||
|
assert @params.split_on_word
|
||||||
|
@params.split_on_word = false
|
||||||
|
assert !@params.split_on_word
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_speed_up
|
||||||
|
@params.speed_up = true
|
||||||
|
assert @params.speed_up
|
||||||
|
@params.speed_up = false
|
||||||
|
assert !@params.speed_up
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_whisper
|
||||||
|
@whisper = Whisper::Context.new(File.join(TOPDIR, '..', '..', 'models', 'ggml-base.en.bin'))
|
||||||
|
params = Whisper::Params.new
|
||||||
|
params.print_timestamps = false
|
||||||
|
|
||||||
|
jfk = File.join(TOPDIR, '..', '..', 'samples', 'jfk.wav')
|
||||||
|
@whisper.transcribe(jfk, params) {|text|
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, text
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
@ -14,6 +14,37 @@ if (WHISPER_SUPPORT_SDL2)
|
|||||||
message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
|
message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# common
|
||||||
|
|
||||||
|
set(TARGET common)
|
||||||
|
|
||||||
|
add_library(${TARGET} STATIC
|
||||||
|
common.h
|
||||||
|
common.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
|
|
||||||
|
if (WHISPER_SUPPORT_SDL2)
|
||||||
|
# common-sdl
|
||||||
|
|
||||||
|
set(TARGET common-sdl)
|
||||||
|
|
||||||
|
add_library(${TARGET} STATIC
|
||||||
|
common-sdl.h
|
||||||
|
common-sdl.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
|
target_include_directories(${TARGET} PUBLIC ${SDL2_INCLUDE_DIRS})
|
||||||
|
target_link_libraries(${TARGET} PRIVATE ${SDL2_LIBRARIES})
|
||||||
|
|
||||||
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
|
endif()
|
||||||
|
|
||||||
# examples
|
# examples
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
@ -24,6 +55,8 @@ if (EMSCRIPTEN)
|
|||||||
add_subdirectory(command.wasm)
|
add_subdirectory(command.wasm)
|
||||||
add_subdirectory(talk.wasm)
|
add_subdirectory(talk.wasm)
|
||||||
add_subdirectory(bench.wasm)
|
add_subdirectory(bench.wasm)
|
||||||
|
elseif(CMAKE_JS_VERSION)
|
||||||
|
add_subdirectory(addon.node)
|
||||||
else()
|
else()
|
||||||
add_subdirectory(main)
|
add_subdirectory(main)
|
||||||
add_subdirectory(stream)
|
add_subdirectory(stream)
|
||||||
|
3
examples/addon.node/.gitignore
vendored
Normal file
3
examples/addon.node/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
.idea
|
||||||
|
node_modules
|
||||||
|
build
|
31
examples/addon.node/CMakeLists.txt
Normal file
31
examples/addon.node/CMakeLists.txt
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
set(TARGET whisper-addon)
|
||||||
|
|
||||||
|
# Base settings
|
||||||
|
#==================================================================
|
||||||
|
# env var supported by cmake-js
|
||||||
|
add_definitions(-DNAPI_VERSION=4)
|
||||||
|
include_directories(${CMAKE_JS_INC})
|
||||||
|
#==================================================================
|
||||||
|
|
||||||
|
add_library(${TARGET} SHARED ${CMAKE_JS_SRC} addon.cpp)
|
||||||
|
set_target_properties(${TARGET} PROPERTIES PREFIX "" SUFFIX ".node")
|
||||||
|
|
||||||
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
|
# Include N-API wrappers
|
||||||
|
#==================================================================
|
||||||
|
execute_process(COMMAND node -p "require('node-addon-api').include"
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
|
OUTPUT_VARIABLE NODE_ADDON_API_DIR
|
||||||
|
)
|
||||||
|
string(REPLACE "\n" "" NODE_ADDON_API_DIR ${NODE_ADDON_API_DIR})
|
||||||
|
string(REPLACE "\"" "" NODE_ADDON_API_DIR ${NODE_ADDON_API_DIR})
|
||||||
|
target_include_directories(${TARGET} PRIVATE ${NODE_ADDON_API_DIR})
|
||||||
|
#==================================================================
|
||||||
|
|
||||||
|
target_link_libraries(${TARGET} ${CMAKE_JS_LIB} common whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
|
if(MSVC AND CMAKE_JS_NODELIB_DEF AND CMAKE_JS_NODELIB_TARGET)
|
||||||
|
# Generate node.lib
|
||||||
|
execute_process(COMMAND ${CMAKE_AR} /def:${CMAKE_JS_NODELIB_DEF} /out:${CMAKE_JS_NODELIB_TARGET} ${CMAKE_STATIC_LINKER_FLAGS})
|
||||||
|
endif()
|
37
examples/addon.node/README.md
Normal file
37
examples/addon.node/README.md
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
# addon
|
||||||
|
|
||||||
|
This is an addon demo that can **perform whisper model reasoning in `node` and `electron` environments**, based on [cmake-js](https://github.com/cmake-js/cmake-js).
|
||||||
|
It can be used as a reference for using the whisper.cpp project in other node projects.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```shell
|
||||||
|
npm install
|
||||||
|
```
|
||||||
|
|
||||||
|
## Compile
|
||||||
|
|
||||||
|
Make sure it is in the project root directory and compiled with make-js.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
npx cmake-js compile -T whisper-addon -B Release
|
||||||
|
```
|
||||||
|
|
||||||
|
For Electron addon and cmake-js options, you can see [cmake-js](https://github.com/cmake-js/cmake-js) and make very few configuration changes.
|
||||||
|
|
||||||
|
> Such as appointing special cmake path:
|
||||||
|
> ```shell
|
||||||
|
> npx cmake-js compile -c 'xxx/cmake' -T whisper-addon -B Release
|
||||||
|
> ```
|
||||||
|
|
||||||
|
## Run
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cd examples/addon.node
|
||||||
|
|
||||||
|
node index.js --language='language' --model='model-path' --fname_inp='file-path'
|
||||||
|
```
|
||||||
|
|
||||||
|
Because this is a simple Demo, only the above parameters are set in the node environment.
|
||||||
|
|
||||||
|
Other parameters can also be specified in the node environment.
|
15
examples/addon.node/__test__/whisper.spec.js
Normal file
15
examples/addon.node/__test__/whisper.spec.js
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
const path = require('path');
|
||||||
|
const { whisper } = require(path.join(__dirname, '../../../build/Release/whisper-addon'));
|
||||||
|
|
||||||
|
const whisperParamsMock = {
|
||||||
|
language: 'en',
|
||||||
|
model: path.join(__dirname, '../../../models/ggml-base.en.bin'),
|
||||||
|
fname_inp: path.join(__dirname, '../../../samples/jfk.wav'),
|
||||||
|
};
|
||||||
|
|
||||||
|
describe("Run whisper.node", () => {
|
||||||
|
|
||||||
|
test("it should receive a non-empty value", () => {
|
||||||
|
expect(whisper(whisperParamsMock).length).toBeGreaterThan(0);
|
||||||
|
});
|
||||||
|
});
|
342
examples/addon.node/addon.cpp
Normal file
342
examples/addon.node/addon.cpp
Normal file
@ -0,0 +1,342 @@
|
|||||||
|
#include "napi.h"
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
|
#include "whisper.h"
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <thread>
|
||||||
|
#include <vector>
|
||||||
|
#include <cmath>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
struct whisper_params {
|
||||||
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
|
int32_t n_processors = 1;
|
||||||
|
int32_t offset_t_ms = 0;
|
||||||
|
int32_t offset_n = 0;
|
||||||
|
int32_t duration_ms = 0;
|
||||||
|
int32_t max_context = -1;
|
||||||
|
int32_t max_len = 0;
|
||||||
|
int32_t best_of = 5;
|
||||||
|
int32_t beam_size = -1;
|
||||||
|
|
||||||
|
float word_thold = 0.01f;
|
||||||
|
float entropy_thold = 2.4f;
|
||||||
|
float logprob_thold = -1.0f;
|
||||||
|
|
||||||
|
bool speed_up = false;
|
||||||
|
bool translate = false;
|
||||||
|
bool diarize = false;
|
||||||
|
bool output_txt = false;
|
||||||
|
bool output_vtt = false;
|
||||||
|
bool output_srt = false;
|
||||||
|
bool output_wts = false;
|
||||||
|
bool output_csv = false;
|
||||||
|
bool print_special = false;
|
||||||
|
bool print_colors = false;
|
||||||
|
bool print_progress = false;
|
||||||
|
bool no_timestamps = false;
|
||||||
|
|
||||||
|
std::string language = "en";
|
||||||
|
std::string prompt;
|
||||||
|
std::string model = "../../ggml-large.bin";
|
||||||
|
|
||||||
|
std::vector<std::string> fname_inp = {};
|
||||||
|
std::vector<std::string> fname_out = {};
|
||||||
|
};
|
||||||
|
|
||||||
|
struct whisper_print_user_data {
|
||||||
|
const whisper_params * params;
|
||||||
|
|
||||||
|
const std::vector<std::vector<float>> * pcmf32s;
|
||||||
|
};
|
||||||
|
|
||||||
|
// 500 -> 00:05.000
|
||||||
|
// 6000 -> 01:00.000
|
||||||
|
std::string to_timestamp(int64_t t, bool comma = false) {
|
||||||
|
int64_t msec = t * 10;
|
||||||
|
int64_t hr = msec / (1000 * 60 * 60);
|
||||||
|
msec = msec - hr * (1000 * 60 * 60);
|
||||||
|
int64_t min = msec / (1000 * 60);
|
||||||
|
msec = msec - min * (1000 * 60);
|
||||||
|
int64_t sec = msec / 1000;
|
||||||
|
msec = msec - sec * 1000;
|
||||||
|
|
||||||
|
char buf[32];
|
||||||
|
snprintf(buf, sizeof(buf), "%02d:%02d:%02d%s%03d", (int) hr, (int) min, (int) sec, comma ? "," : ".", (int) msec);
|
||||||
|
|
||||||
|
return std::string(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
int timestamp_to_sample(int64_t t, int n_samples) {
|
||||||
|
return std::max(0, std::min((int) n_samples - 1, (int) ((t*WHISPER_SAMPLE_RATE)/100)));
|
||||||
|
}
|
||||||
|
|
||||||
|
void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper_state * state, int n_new, void * user_data) {
|
||||||
|
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
||||||
|
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
||||||
|
|
||||||
|
const int n_segments = whisper_full_n_segments(ctx);
|
||||||
|
|
||||||
|
std::string speaker = "";
|
||||||
|
|
||||||
|
int64_t t0;
|
||||||
|
int64_t t1;
|
||||||
|
|
||||||
|
// print the last n_new segments
|
||||||
|
const int s0 = n_segments - n_new;
|
||||||
|
|
||||||
|
if (s0 == 0) {
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = s0; i < n_segments; i++) {
|
||||||
|
if (!params.no_timestamps || params.diarize) {
|
||||||
|
t0 = whisper_full_get_segment_t0(ctx, i);
|
||||||
|
t1 = whisper_full_get_segment_t1(ctx, i);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!params.no_timestamps) {
|
||||||
|
printf("[%s --> %s] ", to_timestamp(t0).c_str(), to_timestamp(t1).c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params.diarize && pcmf32s.size() == 2) {
|
||||||
|
const int64_t n_samples = pcmf32s[0].size();
|
||||||
|
|
||||||
|
const int64_t is0 = timestamp_to_sample(t0, n_samples);
|
||||||
|
const int64_t is1 = timestamp_to_sample(t1, n_samples);
|
||||||
|
|
||||||
|
double energy0 = 0.0f;
|
||||||
|
double energy1 = 0.0f;
|
||||||
|
|
||||||
|
for (int64_t j = is0; j < is1; j++) {
|
||||||
|
energy0 += fabs(pcmf32s[0][j]);
|
||||||
|
energy1 += fabs(pcmf32s[1][j]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (energy0 > 1.1*energy1) {
|
||||||
|
speaker = "(speaker 0)";
|
||||||
|
} else if (energy1 > 1.1*energy0) {
|
||||||
|
speaker = "(speaker 1)";
|
||||||
|
} else {
|
||||||
|
speaker = "(speaker ?)";
|
||||||
|
}
|
||||||
|
|
||||||
|
//printf("is0 = %lld, is1 = %lld, energy0 = %f, energy1 = %f, %s\n", is0, is1, energy0, energy1, speaker.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
// colorful print bug
|
||||||
|
//
|
||||||
|
const char * text = whisper_full_get_segment_text(ctx, i);
|
||||||
|
printf("%s%s", speaker.c_str(), text);
|
||||||
|
|
||||||
|
|
||||||
|
// with timestamps or speakers: each segment on new line
|
||||||
|
if (!params.no_timestamps || params.diarize) {
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
fflush(stdout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
||||||
|
if (params.fname_inp.empty()) {
|
||||||
|
fprintf(stderr, "error: no input files specified\n");
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params.language != "auto" && whisper_lang_id(params.language.c_str()) == -1) {
|
||||||
|
fprintf(stderr, "error: unknown language '%s'\n", params.language.c_str());
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// whisper init
|
||||||
|
|
||||||
|
struct whisper_context * ctx = whisper_init_from_file(params.model.c_str());
|
||||||
|
|
||||||
|
if (ctx == nullptr) {
|
||||||
|
fprintf(stderr, "error: failed to initialize whisper context\n");
|
||||||
|
return 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// initial prompt
|
||||||
|
std::vector<whisper_token> prompt_tokens;
|
||||||
|
|
||||||
|
if (!params.prompt.empty()) {
|
||||||
|
prompt_tokens.resize(1024);
|
||||||
|
prompt_tokens.resize(whisper_tokenize(ctx, params.prompt.c_str(), prompt_tokens.data(), prompt_tokens.size()));
|
||||||
|
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
fprintf(stderr, "initial prompt: '%s'\n", params.prompt.c_str());
|
||||||
|
fprintf(stderr, "initial tokens: [ ");
|
||||||
|
for (int i = 0; i < (int) prompt_tokens.size(); ++i) {
|
||||||
|
fprintf(stderr, "%d ", prompt_tokens[i]);
|
||||||
|
}
|
||||||
|
fprintf(stderr, "]\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int f = 0; f < (int) params.fname_inp.size(); ++f) {
|
||||||
|
const auto fname_inp = params.fname_inp[f];
|
||||||
|
const auto fname_out = f < (int)params.fname_out.size() && !params.fname_out[f].empty() ? params.fname_out[f] : params.fname_inp[f];
|
||||||
|
|
||||||
|
std::vector<float> pcmf32; // mono-channel F32 PCM
|
||||||
|
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
||||||
|
|
||||||
|
if (!::read_wav(fname_inp, pcmf32, pcmf32s, params.diarize)) {
|
||||||
|
fprintf(stderr, "error: failed to read WAV file '%s'\n", fname_inp.c_str());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// print system information
|
||||||
|
{
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
||||||
|
params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info());
|
||||||
|
}
|
||||||
|
|
||||||
|
// print some info about the processing
|
||||||
|
{
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
if (!whisper_is_multilingual(ctx)) {
|
||||||
|
if (params.language != "en" || params.translate) {
|
||||||
|
params.language = "en";
|
||||||
|
params.translate = false;
|
||||||
|
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, lang = %s, task = %s, timestamps = %d ...\n",
|
||||||
|
__func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
|
||||||
|
params.n_threads, params.n_processors,
|
||||||
|
params.language.c_str(),
|
||||||
|
params.translate ? "translate" : "transcribe",
|
||||||
|
params.no_timestamps ? 0 : 1);
|
||||||
|
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
// run the inference
|
||||||
|
{
|
||||||
|
whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
|
||||||
|
|
||||||
|
wparams.strategy = params.beam_size > 1 ? WHISPER_SAMPLING_BEAM_SEARCH : WHISPER_SAMPLING_GREEDY;
|
||||||
|
|
||||||
|
wparams.print_realtime = false;
|
||||||
|
wparams.print_progress = params.print_progress;
|
||||||
|
wparams.print_timestamps = !params.no_timestamps;
|
||||||
|
wparams.print_special = params.print_special;
|
||||||
|
wparams.translate = params.translate;
|
||||||
|
wparams.language = params.language.c_str();
|
||||||
|
wparams.n_threads = params.n_threads;
|
||||||
|
wparams.n_max_text_ctx = params.max_context >= 0 ? params.max_context : wparams.n_max_text_ctx;
|
||||||
|
wparams.offset_ms = params.offset_t_ms;
|
||||||
|
wparams.duration_ms = params.duration_ms;
|
||||||
|
|
||||||
|
wparams.token_timestamps = params.output_wts || params.max_len > 0;
|
||||||
|
wparams.thold_pt = params.word_thold;
|
||||||
|
wparams.entropy_thold = params.entropy_thold;
|
||||||
|
wparams.logprob_thold = params.logprob_thold;
|
||||||
|
wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
|
||||||
|
|
||||||
|
wparams.speed_up = params.speed_up;
|
||||||
|
|
||||||
|
wparams.greedy.best_of = params.best_of;
|
||||||
|
wparams.beam_search.beam_size = params.beam_size;
|
||||||
|
|
||||||
|
wparams.prompt_tokens = prompt_tokens.empty() ? nullptr : prompt_tokens.data();
|
||||||
|
wparams.prompt_n_tokens = prompt_tokens.empty() ? 0 : prompt_tokens.size();
|
||||||
|
|
||||||
|
whisper_print_user_data user_data = { ¶ms, &pcmf32s };
|
||||||
|
|
||||||
|
// this callback is called on each new segment
|
||||||
|
if (!wparams.print_realtime) {
|
||||||
|
wparams.new_segment_callback = whisper_print_segment_callback;
|
||||||
|
wparams.new_segment_callback_user_data = &user_data;
|
||||||
|
}
|
||||||
|
|
||||||
|
// example for abort mechanism
|
||||||
|
// in this example, we do not abort the processing, but we could if the flag is set to true
|
||||||
|
// the callback is called before every encoder run - if it returns false, the processing is aborted
|
||||||
|
{
|
||||||
|
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
||||||
|
|
||||||
|
wparams.encoder_begin_callback = [](struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, void * user_data) {
|
||||||
|
bool is_aborted = *(bool*)user_data;
|
||||||
|
return !is_aborted;
|
||||||
|
};
|
||||||
|
wparams.encoder_begin_callback_user_data = &is_aborted;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (whisper_full_parallel(ctx, wparams, pcmf32.data(), pcmf32.size(), params.n_processors) != 0) {
|
||||||
|
fprintf(stderr, "failed to process audio\n");
|
||||||
|
return 10;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const int n_segments = whisper_full_n_segments(ctx);
|
||||||
|
result.resize(n_segments);
|
||||||
|
for (int i = 0; i < n_segments; ++i) {
|
||||||
|
const char * text = whisper_full_get_segment_text(ctx, i);
|
||||||
|
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
||||||
|
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
||||||
|
|
||||||
|
result[i].emplace_back(to_timestamp(t0, true));
|
||||||
|
result[i].emplace_back(to_timestamp(t1, true));
|
||||||
|
result[i].emplace_back(text);
|
||||||
|
}
|
||||||
|
|
||||||
|
whisper_print_timings(ctx);
|
||||||
|
whisper_free(ctx);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
Napi::Object whisper(const Napi::CallbackInfo& info) {
|
||||||
|
Napi::Env env = info.Env();
|
||||||
|
if (info.Length() <= 0 || !info[0].IsObject()) {
|
||||||
|
Napi::TypeError::New(env, "object expected").ThrowAsJavaScriptException();
|
||||||
|
}
|
||||||
|
whisper_params params;
|
||||||
|
std::vector<std::vector<std::string>> result;
|
||||||
|
|
||||||
|
Napi::Object whisper_params = info[0].As<Napi::Object>();
|
||||||
|
std::string language = whisper_params.Get("language").As<Napi::String>();
|
||||||
|
std::string model = whisper_params.Get("model").As<Napi::String>();
|
||||||
|
std::string input = whisper_params.Get("fname_inp").As<Napi::String>();
|
||||||
|
|
||||||
|
params.language = language;
|
||||||
|
params.model = model;
|
||||||
|
params.fname_inp.emplace_back(input);
|
||||||
|
|
||||||
|
// run model
|
||||||
|
run(params, result);
|
||||||
|
|
||||||
|
fprintf(stderr, "RESULT:\n");
|
||||||
|
for (auto sentence:result) {
|
||||||
|
fprintf(stderr, "t0: %s, t1: %s, content: %s \n",
|
||||||
|
sentence[0].c_str(), sentence[1].c_str(), sentence[2].c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
Napi::Object res = Napi::Array::New(env, result.size());
|
||||||
|
for (uint64_t i = 0; i < result.size(); ++i) {
|
||||||
|
Napi::Object tmp = Napi::Array::New(env, 3);
|
||||||
|
for (uint64_t j = 0; j < 3; ++j) {
|
||||||
|
tmp[j] = Napi::String::New(env, result[i][j]);
|
||||||
|
}
|
||||||
|
res[i] = tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Napi::Object Init(Napi::Env env, Napi::Object exports) {
|
||||||
|
exports.Set(
|
||||||
|
Napi::String::New(env, "whisper"),
|
||||||
|
Napi::Function::New(env, whisper)
|
||||||
|
);
|
||||||
|
return exports;
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_API_MODULE(whisper, Init);
|
27
examples/addon.node/index.js
Normal file
27
examples/addon.node/index.js
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
const path = require('path');
|
||||||
|
const { whisper } = require(path.join(__dirname, '../../build/Release/whisper-addon'));
|
||||||
|
|
||||||
|
const whisperParams = {
|
||||||
|
language: 'en',
|
||||||
|
model: path.join(__dirname, '../../models/ggml-base.en.bin'),
|
||||||
|
fname_inp: '',
|
||||||
|
};
|
||||||
|
|
||||||
|
const arguments = process.argv.slice(2);
|
||||||
|
const params = Object.fromEntries(
|
||||||
|
arguments.reduce((pre, item) => {
|
||||||
|
if (item.startsWith("--")) {
|
||||||
|
return [...pre, item.slice(2).split("=")];
|
||||||
|
}
|
||||||
|
return pre;
|
||||||
|
}, []),
|
||||||
|
);
|
||||||
|
|
||||||
|
for (const key in params) {
|
||||||
|
if (whisperParams.hasOwnProperty(key)) {
|
||||||
|
whisperParams[key] = params[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('whisperParams =', whisperParams);
|
||||||
|
console.log(whisper(whisperParams));
|
16
examples/addon.node/package.json
Normal file
16
examples/addon.node/package.json
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"name": "whisper-addon",
|
||||||
|
"version": "0.0.0",
|
||||||
|
"description": "",
|
||||||
|
"main": "index.js",
|
||||||
|
"author": "Qanhe Chen",
|
||||||
|
"license": "MIT",
|
||||||
|
"scripts": {
|
||||||
|
"test": "jest"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"cmake-js": "^7.1.1",
|
||||||
|
"jest": "^29.4.0",
|
||||||
|
"node-addon-api": "^5.0.0"
|
||||||
|
}
|
||||||
|
}
|
@ -11,6 +11,7 @@ add_executable(${TARGET}
|
|||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE
|
target_link_libraries(${TARGET} PRIVATE
|
||||||
|
common
|
||||||
whisper
|
whisper
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
|
#include "common.h"
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
|
||||||
#include <emscripten.h>
|
#include <emscripten.h>
|
||||||
@ -27,24 +28,6 @@ std::string g_transcribed = "";
|
|||||||
|
|
||||||
std::vector<float> g_pcmf32;
|
std::vector<float> g_pcmf32;
|
||||||
|
|
||||||
static std::string trim(const std::string & s) {
|
|
||||||
std::regex e("^\\s+|\\s+$");
|
|
||||||
return std::regex_replace(s, e, "");
|
|
||||||
}
|
|
||||||
|
|
||||||
static void high_pass_filter(std::vector<float> & data, float cutoff, float sample_rate) {
|
|
||||||
const float rc = 1.0f / (2.0f * M_PI * cutoff);
|
|
||||||
const float dt = 1.0f / sample_rate;
|
|
||||||
const float alpha = dt / (rc + dt);
|
|
||||||
|
|
||||||
float y = data[0];
|
|
||||||
|
|
||||||
for (size_t i = 1; i < data.size(); i++) {
|
|
||||||
y = alpha * (y + data[i] - data[i - 1]);
|
|
||||||
data[i] = y;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// compute similarity between two strings using Levenshtein distance
|
// compute similarity between two strings using Levenshtein distance
|
||||||
static float similarity(const std::string & s0, const std::string & s1) {
|
static float similarity(const std::string & s0, const std::string & s1) {
|
||||||
const size_t len0 = s0.size() + 1;
|
const size_t len0 = s0.size() + 1;
|
||||||
@ -75,44 +58,6 @@ void command_set_status(const std::string & status) {
|
|||||||
g_status = status;
|
g_status = status;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool command_vad_simple(std::vector<float> & pcmf32, int sample_rate, int last_ms, float vad_thold, float freq_thold, bool verbose) {
|
|
||||||
const int n_samples = pcmf32.size();
|
|
||||||
const int n_samples_last = (sample_rate * last_ms) / 1000;
|
|
||||||
|
|
||||||
if (n_samples_last >= n_samples) {
|
|
||||||
// not enough samples - assume no speech
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (freq_thold > 0.0f) {
|
|
||||||
high_pass_filter(pcmf32, freq_thold, sample_rate);
|
|
||||||
}
|
|
||||||
|
|
||||||
float energy_all = 0.0f;
|
|
||||||
float energy_last = 0.0f;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < n_samples; i++) {
|
|
||||||
energy_all += fabsf(pcmf32[i]);
|
|
||||||
|
|
||||||
if (i >= n_samples - n_samples_last) {
|
|
||||||
energy_last += fabsf(pcmf32[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
energy_all /= n_samples;
|
|
||||||
energy_last /= n_samples_last;
|
|
||||||
|
|
||||||
if (verbose) {
|
|
||||||
fprintf(stderr, "%s: energy_all: %f, energy_last: %f, vad_thold: %f, freq_thold: %f\n", __func__, energy_all, energy_last, vad_thold, freq_thold);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (energy_last > vad_thold*energy_all) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string command_transcribe(whisper_context * ctx, const whisper_full_params & wparams, const std::vector<float> & pcmf32, float & prob, int64_t & t_ms) {
|
std::string command_transcribe(whisper_context * ctx, const whisper_full_params & wparams, const std::vector<float> & pcmf32, float & prob, int64_t & t_ms) {
|
||||||
const auto t_start = std::chrono::high_resolution_clock::now();
|
const auto t_start = std::chrono::high_resolution_clock::now();
|
||||||
|
|
||||||
@ -155,7 +100,7 @@ void command_get_audio(int ms, int sample_rate, std::vector<float> & audio) {
|
|||||||
const int64_t n_samples = (ms * sample_rate) / 1000;
|
const int64_t n_samples = (ms * sample_rate) / 1000;
|
||||||
|
|
||||||
int64_t n_take = 0;
|
int64_t n_take = 0;
|
||||||
if (g_pcmf32.size() < n_samples) {
|
if (n_samples > (int) g_pcmf32.size()) {
|
||||||
n_take = g_pcmf32.size();
|
n_take = g_pcmf32.size();
|
||||||
} else {
|
} else {
|
||||||
n_take = n_samples;
|
n_take = n_samples;
|
||||||
@ -187,7 +132,6 @@ void command_main(size_t index) {
|
|||||||
|
|
||||||
printf("command: using %d threads\n", wparams.n_threads);
|
printf("command: using %d threads\n", wparams.n_threads);
|
||||||
|
|
||||||
bool is_running = true;
|
|
||||||
bool have_prompt = false;
|
bool have_prompt = false;
|
||||||
bool ask_prompt = true;
|
bool ask_prompt = true;
|
||||||
bool print_energy = false;
|
bool print_energy = false;
|
||||||
@ -233,7 +177,7 @@ void command_main(size_t index) {
|
|||||||
{
|
{
|
||||||
command_get_audio(vad_ms, WHISPER_SAMPLE_RATE, pcmf32_cur);
|
command_get_audio(vad_ms, WHISPER_SAMPLE_RATE, pcmf32_cur);
|
||||||
|
|
||||||
if (command_vad_simple(pcmf32_cur, WHISPER_SAMPLE_RATE, 1000, vad_thold, freq_thold, print_energy)) {
|
if (::vad_simple(pcmf32_cur, WHISPER_SAMPLE_RATE, 1000, vad_thold, freq_thold, print_energy)) {
|
||||||
fprintf(stdout, "%s: Speech detected! Processing ...\n", __func__);
|
fprintf(stdout, "%s: Speech detected! Processing ...\n", __func__);
|
||||||
command_set_status("Speech detected! Processing ...");
|
command_set_status("Speech detected! Processing ...");
|
||||||
|
|
||||||
|
@ -5,6 +5,5 @@ if (WHISPER_SUPPORT_SDL2)
|
|||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_include_directories(${TARGET} PRIVATE ${SDL2_INCLUDE_DIRS})
|
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
target_link_libraries(${TARGET} PRIVATE whisper ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -6,11 +6,10 @@
|
|||||||
// ref: https://github.com/ggerganov/whisper.cpp/issues/171
|
// ref: https://github.com/ggerganov/whisper.cpp/issues/171
|
||||||
//
|
//
|
||||||
|
|
||||||
|
#include "common.h"
|
||||||
|
#include "common-sdl.h"
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
|
||||||
#include <SDL.h>
|
|
||||||
#include <SDL_audio.h>
|
|
||||||
|
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
@ -110,309 +109,6 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
// SDL Audio capture
|
|
||||||
//
|
|
||||||
|
|
||||||
class audio_async {
|
|
||||||
public:
|
|
||||||
audio_async(int len_ms);
|
|
||||||
~audio_async();
|
|
||||||
|
|
||||||
bool init(int capture_id, int sample_rate);
|
|
||||||
|
|
||||||
// start capturing audio via the provided SDL callback
|
|
||||||
// keep last len_ms seconds of audio in a circular buffer
|
|
||||||
bool resume();
|
|
||||||
bool pause();
|
|
||||||
bool clear();
|
|
||||||
|
|
||||||
// callback to be called by SDL
|
|
||||||
void callback(uint8_t * stream, int len);
|
|
||||||
|
|
||||||
// get audio data from the circular buffer
|
|
||||||
void get(int ms, std::vector<float> & audio);
|
|
||||||
|
|
||||||
private:
|
|
||||||
SDL_AudioDeviceID m_dev_id_in = 0;
|
|
||||||
|
|
||||||
int m_len_ms = 0;
|
|
||||||
int m_sample_rate = 0;
|
|
||||||
|
|
||||||
bool m_running = false;
|
|
||||||
std::mutex m_mutex;
|
|
||||||
|
|
||||||
std::vector<float> m_audio;
|
|
||||||
std::vector<float> m_audio_new;
|
|
||||||
size_t m_audio_pos = 0;
|
|
||||||
size_t m_audio_len = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
audio_async::audio_async(int len_ms) {
|
|
||||||
m_len_ms = len_ms;
|
|
||||||
}
|
|
||||||
|
|
||||||
audio_async::~audio_async() {
|
|
||||||
if (m_dev_id_in) {
|
|
||||||
SDL_CloseAudioDevice(m_dev_id_in);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool audio_async::init(int capture_id, int sample_rate) {
|
|
||||||
SDL_LogSetPriority(SDL_LOG_CATEGORY_APPLICATION, SDL_LOG_PRIORITY_INFO);
|
|
||||||
|
|
||||||
if (SDL_Init(SDL_INIT_AUDIO) < 0) {
|
|
||||||
SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Couldn't initialize SDL: %s\n", SDL_GetError());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_SetHintWithPriority(SDL_HINT_AUDIO_RESAMPLING_MODE, "medium", SDL_HINT_OVERRIDE);
|
|
||||||
|
|
||||||
{
|
|
||||||
int nDevices = SDL_GetNumAudioDevices(SDL_TRUE);
|
|
||||||
fprintf(stderr, "%s: found %d capture devices:\n", __func__, nDevices);
|
|
||||||
for (int i = 0; i < nDevices; i++) {
|
|
||||||
fprintf(stderr, "%s: - Capture device #%d: '%s'\n", __func__, i, SDL_GetAudioDeviceName(i, SDL_TRUE));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_AudioSpec capture_spec_requested;
|
|
||||||
SDL_AudioSpec capture_spec_obtained;
|
|
||||||
|
|
||||||
SDL_zero(capture_spec_requested);
|
|
||||||
SDL_zero(capture_spec_obtained);
|
|
||||||
|
|
||||||
capture_spec_requested.freq = sample_rate;
|
|
||||||
capture_spec_requested.format = AUDIO_F32;
|
|
||||||
capture_spec_requested.channels = 1;
|
|
||||||
capture_spec_requested.samples = 1024;
|
|
||||||
capture_spec_requested.callback = [](void * userdata, uint8_t * stream, int len) {
|
|
||||||
audio_async * audio = (audio_async *) userdata;
|
|
||||||
audio->callback(stream, len);
|
|
||||||
};
|
|
||||||
capture_spec_requested.userdata = this;
|
|
||||||
|
|
||||||
if (capture_id >= 0) {
|
|
||||||
fprintf(stderr, "%s: attempt to open capture device %d : '%s' ...\n", __func__, capture_id, SDL_GetAudioDeviceName(capture_id, SDL_TRUE));
|
|
||||||
m_dev_id_in = SDL_OpenAudioDevice(SDL_GetAudioDeviceName(capture_id, SDL_TRUE), SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
|
|
||||||
} else {
|
|
||||||
fprintf(stderr, "%s: attempt to open default capture device ...\n", __func__);
|
|
||||||
m_dev_id_in = SDL_OpenAudioDevice(nullptr, SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: couldn't open an audio device for capture: %s!\n", __func__, SDL_GetError());
|
|
||||||
m_dev_id_in = 0;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
fprintf(stderr, "%s: obtained spec for input device (SDL Id = %d):\n", __func__, m_dev_id_in);
|
|
||||||
fprintf(stderr, "%s: - sample rate: %d\n", __func__, capture_spec_obtained.freq);
|
|
||||||
fprintf(stderr, "%s: - format: %d (required: %d)\n", __func__, capture_spec_obtained.format,
|
|
||||||
capture_spec_requested.format);
|
|
||||||
fprintf(stderr, "%s: - channels: %d (required: %d)\n", __func__, capture_spec_obtained.channels,
|
|
||||||
capture_spec_requested.channels);
|
|
||||||
fprintf(stderr, "%s: - samples per frame: %d\n", __func__, capture_spec_obtained.samples);
|
|
||||||
}
|
|
||||||
|
|
||||||
m_sample_rate = capture_spec_obtained.freq;
|
|
||||||
|
|
||||||
m_audio.resize((m_sample_rate*m_len_ms)/1000);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool audio_async::resume() {
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: no audio device to resume!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (m_running) {
|
|
||||||
fprintf(stderr, "%s: already running!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_PauseAudioDevice(m_dev_id_in, 0);
|
|
||||||
|
|
||||||
m_running = true;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool audio_async::pause() {
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: no audio device to pause!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!m_running) {
|
|
||||||
fprintf(stderr, "%s: already paused!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_PauseAudioDevice(m_dev_id_in, 1);
|
|
||||||
|
|
||||||
m_running = false;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool audio_async::clear() {
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: no audio device to clear!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!m_running) {
|
|
||||||
fprintf(stderr, "%s: not running!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
|
||||||
|
|
||||||
m_audio_pos = 0;
|
|
||||||
m_audio_len = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// callback to be called by SDL
|
|
||||||
void audio_async::callback(uint8_t * stream, int len) {
|
|
||||||
if (!m_running) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t n_samples = len / sizeof(float);
|
|
||||||
|
|
||||||
m_audio_new.resize(n_samples);
|
|
||||||
memcpy(m_audio_new.data(), stream, n_samples * sizeof(float));
|
|
||||||
|
|
||||||
//fprintf(stderr, "%s: %zu samples, pos %zu, len %zu\n", __func__, n_samples, m_audio_pos, m_audio_len);
|
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
|
||||||
|
|
||||||
if (m_audio_pos + n_samples > m_audio.size()) {
|
|
||||||
const size_t n0 = m_audio.size() - m_audio_pos;
|
|
||||||
|
|
||||||
memcpy(&m_audio[m_audio_pos], stream, n0 * sizeof(float));
|
|
||||||
memcpy(&m_audio[0], &stream[n0], (n_samples - n0) * sizeof(float));
|
|
||||||
|
|
||||||
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
|
||||||
m_audio_len = m_audio.size();
|
|
||||||
} else {
|
|
||||||
memcpy(&m_audio[m_audio_pos], stream, n_samples * sizeof(float));
|
|
||||||
|
|
||||||
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
|
||||||
m_audio_len = std::min(m_audio_len + n_samples, m_audio.size());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void audio_async::get(int ms, std::vector<float> & result) {
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: no audio device to get audio from!\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!m_running) {
|
|
||||||
fprintf(stderr, "%s: not running!\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
result.clear();
|
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
|
||||||
|
|
||||||
if (ms <= 0) {
|
|
||||||
ms = m_len_ms;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t n_samples = (m_sample_rate * ms) / 1000;
|
|
||||||
if (n_samples > m_audio_len) {
|
|
||||||
n_samples = m_audio_len;
|
|
||||||
}
|
|
||||||
|
|
||||||
result.resize(n_samples);
|
|
||||||
|
|
||||||
int s0 = m_audio_pos - n_samples;
|
|
||||||
if (s0 < 0) {
|
|
||||||
s0 += m_audio.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (s0 + n_samples > m_audio.size()) {
|
|
||||||
const size_t n0 = m_audio.size() - s0;
|
|
||||||
|
|
||||||
memcpy(result.data(), &m_audio[s0], n0 * sizeof(float));
|
|
||||||
memcpy(&result[n0], &m_audio[0], (n_samples - n0) * sizeof(float));
|
|
||||||
} else {
|
|
||||||
memcpy(result.data(), &m_audio[s0], n_samples * sizeof(float));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
///////////////////////////
|
|
||||||
|
|
||||||
std::string trim(const std::string & s) {
|
|
||||||
std::regex e("^\\s+|\\s+$");
|
|
||||||
return std::regex_replace(s, e, "");
|
|
||||||
}
|
|
||||||
|
|
||||||
void high_pass_filter(std::vector<float> & data, float cutoff, float sample_rate) {
|
|
||||||
const float rc = 1.0f / (2.0f * M_PI * cutoff);
|
|
||||||
const float dt = 1.0f / sample_rate;
|
|
||||||
const float alpha = dt / (rc + dt);
|
|
||||||
|
|
||||||
float y = data[0];
|
|
||||||
|
|
||||||
for (size_t i = 1; i < data.size(); i++) {
|
|
||||||
y = alpha * (y + data[i] - data[i - 1]);
|
|
||||||
data[i] = y;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool vad_simple(std::vector<float> & pcmf32, int sample_rate, int last_ms, float vad_thold, float freq_thold, bool verbose) {
|
|
||||||
const int n_samples = pcmf32.size();
|
|
||||||
const int n_samples_last = (sample_rate * last_ms) / 1000;
|
|
||||||
|
|
||||||
if (n_samples_last >= n_samples) {
|
|
||||||
// not enough samples - assume no speech
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (freq_thold > 0.0f) {
|
|
||||||
high_pass_filter(pcmf32, freq_thold, sample_rate);
|
|
||||||
}
|
|
||||||
|
|
||||||
float energy_all = 0.0f;
|
|
||||||
float energy_last = 0.0f;
|
|
||||||
|
|
||||||
for (int i = 0; i < n_samples; i++) {
|
|
||||||
energy_all += fabsf(pcmf32[i]);
|
|
||||||
|
|
||||||
if (i >= n_samples - n_samples_last) {
|
|
||||||
energy_last += fabsf(pcmf32[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
energy_all /= n_samples;
|
|
||||||
energy_last /= n_samples_last;
|
|
||||||
|
|
||||||
if (verbose) {
|
|
||||||
fprintf(stderr, "%s: energy_all: %f, energy_last: %f, vad_thold: %f, freq_thold: %f\n", __func__, energy_all, energy_last, vad_thold, freq_thold);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (energy_last > vad_thold*energy_all) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string transcribe(whisper_context * ctx, const whisper_params & params, const std::vector<float> & pcmf32, float & prob, int64_t & t_ms) {
|
std::string transcribe(whisper_context * ctx, const whisper_params & params, const std::vector<float> & pcmf32, float & prob, int64_t & t_ms) {
|
||||||
const auto t_start = std::chrono::high_resolution_clock::now();
|
const auto t_start = std::chrono::high_resolution_clock::now();
|
||||||
|
|
||||||
@ -502,7 +198,7 @@ std::vector<std::string> read_allowed_commands(const std::string & fname) {
|
|||||||
|
|
||||||
std::string line;
|
std::string line;
|
||||||
while (std::getline(ifs, line)) {
|
while (std::getline(ifs, line)) {
|
||||||
line = trim(line);
|
line = ::trim(line);
|
||||||
if (line.empty()) {
|
if (line.empty()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -526,23 +222,6 @@ std::vector<std::string> get_words(const std::string &txt) {
|
|||||||
return words;
|
return words;
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns true if no exit event was received
|
|
||||||
bool process_sdl_events() {
|
|
||||||
SDL_Event event;
|
|
||||||
while (SDL_PollEvent(&event)) {
|
|
||||||
switch (event.type) {
|
|
||||||
case SDL_QUIT:
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
} break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// command-list mode
|
// command-list mode
|
||||||
// guide the transcription to match the most likely command from a provided list
|
// guide the transcription to match the most likely command from a provided list
|
||||||
int process_command_list(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms) {
|
int process_command_list(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms) {
|
||||||
@ -634,14 +313,14 @@ int process_command_list(struct whisper_context * ctx, audio_async &audio, const
|
|||||||
// main loop
|
// main loop
|
||||||
while (is_running) {
|
while (is_running) {
|
||||||
// handle Ctrl + C
|
// handle Ctrl + C
|
||||||
is_running = process_sdl_events();
|
is_running = sdl_poll_events();
|
||||||
|
|
||||||
// delay
|
// delay
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||||
|
|
||||||
audio.get(2000, pcmf32_cur);
|
audio.get(2000, pcmf32_cur);
|
||||||
|
|
||||||
if (vad_simple(pcmf32_cur, WHISPER_SAMPLE_RATE, 1000, params.vad_thold, params.freq_thold, params.print_energy)) {
|
if (::vad_simple(pcmf32_cur, WHISPER_SAMPLE_RATE, 1000, params.vad_thold, params.freq_thold, params.print_energy)) {
|
||||||
fprintf(stdout, "%s: Speech detected! Processing ...\n", __func__);
|
fprintf(stdout, "%s: Speech detected! Processing ...\n", __func__);
|
||||||
|
|
||||||
const auto t_start = std::chrono::high_resolution_clock::now();
|
const auto t_start = std::chrono::high_resolution_clock::now();
|
||||||
@ -775,7 +454,7 @@ int always_prompt_transcription(struct whisper_context * ctx, audio_async & audi
|
|||||||
// main loop
|
// main loop
|
||||||
while (is_running) {
|
while (is_running) {
|
||||||
// handle Ctrl + C
|
// handle Ctrl + C
|
||||||
is_running = process_sdl_events();
|
is_running = sdl_poll_events();
|
||||||
|
|
||||||
// delay
|
// delay
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||||
@ -791,7 +470,7 @@ int always_prompt_transcription(struct whisper_context * ctx, audio_async & audi
|
|||||||
{
|
{
|
||||||
audio.get(2000, pcmf32_cur);
|
audio.get(2000, pcmf32_cur);
|
||||||
|
|
||||||
if (vad_simple(pcmf32_cur, WHISPER_SAMPLE_RATE, 1000, params.vad_thold, params.freq_thold, params.print_energy)) {
|
if (::vad_simple(pcmf32_cur, WHISPER_SAMPLE_RATE, 1000, params.vad_thold, params.freq_thold, params.print_energy)) {
|
||||||
fprintf(stdout, "%s: Speech detected! Processing ...\n", __func__);
|
fprintf(stdout, "%s: Speech detected! Processing ...\n", __func__);
|
||||||
|
|
||||||
int64_t t_ms = 0;
|
int64_t t_ms = 0;
|
||||||
@ -854,7 +533,7 @@ int process_general_transcription(struct whisper_context * ctx, audio_async &aud
|
|||||||
// main loop
|
// main loop
|
||||||
while (is_running) {
|
while (is_running) {
|
||||||
// handle Ctrl + C
|
// handle Ctrl + C
|
||||||
is_running = process_sdl_events();
|
is_running = sdl_poll_events();
|
||||||
|
|
||||||
// delay
|
// delay
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||||
@ -870,7 +549,7 @@ int process_general_transcription(struct whisper_context * ctx, audio_async &aud
|
|||||||
{
|
{
|
||||||
audio.get(2000, pcmf32_cur);
|
audio.get(2000, pcmf32_cur);
|
||||||
|
|
||||||
if (vad_simple(pcmf32_cur, WHISPER_SAMPLE_RATE, 1000, params.vad_thold, params.freq_thold, params.print_energy)) {
|
if (::vad_simple(pcmf32_cur, WHISPER_SAMPLE_RATE, 1000, params.vad_thold, params.freq_thold, params.print_energy)) {
|
||||||
fprintf(stdout, "%s: Speech detected! Processing ...\n", __func__);
|
fprintf(stdout, "%s: Speech detected! Processing ...\n", __func__);
|
||||||
|
|
||||||
int64_t t_ms = 0;
|
int64_t t_ms = 0;
|
||||||
|
226
examples/common-sdl.cpp
Normal file
226
examples/common-sdl.cpp
Normal file
@ -0,0 +1,226 @@
|
|||||||
|
#include "common-sdl.h"
|
||||||
|
|
||||||
|
audio_async::audio_async(int len_ms) {
|
||||||
|
m_len_ms = len_ms;
|
||||||
|
|
||||||
|
m_running = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
audio_async::~audio_async() {
|
||||||
|
if (m_dev_id_in) {
|
||||||
|
SDL_CloseAudioDevice(m_dev_id_in);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool audio_async::init(int capture_id, int sample_rate) {
|
||||||
|
SDL_LogSetPriority(SDL_LOG_CATEGORY_APPLICATION, SDL_LOG_PRIORITY_INFO);
|
||||||
|
|
||||||
|
if (SDL_Init(SDL_INIT_AUDIO) < 0) {
|
||||||
|
SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Couldn't initialize SDL: %s\n", SDL_GetError());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
SDL_SetHintWithPriority(SDL_HINT_AUDIO_RESAMPLING_MODE, "medium", SDL_HINT_OVERRIDE);
|
||||||
|
|
||||||
|
{
|
||||||
|
int nDevices = SDL_GetNumAudioDevices(SDL_TRUE);
|
||||||
|
fprintf(stderr, "%s: found %d capture devices:\n", __func__, nDevices);
|
||||||
|
for (int i = 0; i < nDevices; i++) {
|
||||||
|
fprintf(stderr, "%s: - Capture device #%d: '%s'\n", __func__, i, SDL_GetAudioDeviceName(i, SDL_TRUE));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SDL_AudioSpec capture_spec_requested;
|
||||||
|
SDL_AudioSpec capture_spec_obtained;
|
||||||
|
|
||||||
|
SDL_zero(capture_spec_requested);
|
||||||
|
SDL_zero(capture_spec_obtained);
|
||||||
|
|
||||||
|
capture_spec_requested.freq = sample_rate;
|
||||||
|
capture_spec_requested.format = AUDIO_F32;
|
||||||
|
capture_spec_requested.channels = 1;
|
||||||
|
capture_spec_requested.samples = 1024;
|
||||||
|
capture_spec_requested.callback = [](void * userdata, uint8_t * stream, int len) {
|
||||||
|
audio_async * audio = (audio_async *) userdata;
|
||||||
|
audio->callback(stream, len);
|
||||||
|
};
|
||||||
|
capture_spec_requested.userdata = this;
|
||||||
|
|
||||||
|
if (capture_id >= 0) {
|
||||||
|
fprintf(stderr, "%s: attempt to open capture device %d : '%s' ...\n", __func__, capture_id, SDL_GetAudioDeviceName(capture_id, SDL_TRUE));
|
||||||
|
m_dev_id_in = SDL_OpenAudioDevice(SDL_GetAudioDeviceName(capture_id, SDL_TRUE), SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "%s: attempt to open default capture device ...\n", __func__);
|
||||||
|
m_dev_id_in = SDL_OpenAudioDevice(nullptr, SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!m_dev_id_in) {
|
||||||
|
fprintf(stderr, "%s: couldn't open an audio device for capture: %s!\n", __func__, SDL_GetError());
|
||||||
|
m_dev_id_in = 0;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "%s: obtained spec for input device (SDL Id = %d):\n", __func__, m_dev_id_in);
|
||||||
|
fprintf(stderr, "%s: - sample rate: %d\n", __func__, capture_spec_obtained.freq);
|
||||||
|
fprintf(stderr, "%s: - format: %d (required: %d)\n", __func__, capture_spec_obtained.format,
|
||||||
|
capture_spec_requested.format);
|
||||||
|
fprintf(stderr, "%s: - channels: %d (required: %d)\n", __func__, capture_spec_obtained.channels,
|
||||||
|
capture_spec_requested.channels);
|
||||||
|
fprintf(stderr, "%s: - samples per frame: %d\n", __func__, capture_spec_obtained.samples);
|
||||||
|
}
|
||||||
|
|
||||||
|
m_sample_rate = capture_spec_obtained.freq;
|
||||||
|
|
||||||
|
m_audio.resize((m_sample_rate*m_len_ms)/1000);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool audio_async::resume() {
|
||||||
|
if (!m_dev_id_in) {
|
||||||
|
fprintf(stderr, "%s: no audio device to resume!\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m_running) {
|
||||||
|
fprintf(stderr, "%s: already running!\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
SDL_PauseAudioDevice(m_dev_id_in, 0);
|
||||||
|
|
||||||
|
m_running = true;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool audio_async::pause() {
|
||||||
|
if (!m_dev_id_in) {
|
||||||
|
fprintf(stderr, "%s: no audio device to pause!\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!m_running) {
|
||||||
|
fprintf(stderr, "%s: already paused!\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
SDL_PauseAudioDevice(m_dev_id_in, 1);
|
||||||
|
|
||||||
|
m_running = false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool audio_async::clear() {
|
||||||
|
if (!m_dev_id_in) {
|
||||||
|
fprintf(stderr, "%s: no audio device to clear!\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!m_running) {
|
||||||
|
fprintf(stderr, "%s: not running!\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(m_mutex);
|
||||||
|
|
||||||
|
m_audio_pos = 0;
|
||||||
|
m_audio_len = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// callback to be called by SDL
|
||||||
|
void audio_async::callback(uint8_t * stream, int len) {
|
||||||
|
if (!m_running) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const size_t n_samples = len / sizeof(float);
|
||||||
|
|
||||||
|
m_audio_new.resize(n_samples);
|
||||||
|
memcpy(m_audio_new.data(), stream, n_samples * sizeof(float));
|
||||||
|
|
||||||
|
//fprintf(stderr, "%s: %zu samples, pos %zu, len %zu\n", __func__, n_samples, m_audio_pos, m_audio_len);
|
||||||
|
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(m_mutex);
|
||||||
|
|
||||||
|
if (m_audio_pos + n_samples > m_audio.size()) {
|
||||||
|
const size_t n0 = m_audio.size() - m_audio_pos;
|
||||||
|
|
||||||
|
memcpy(&m_audio[m_audio_pos], stream, n0 * sizeof(float));
|
||||||
|
memcpy(&m_audio[0], &stream[n0], (n_samples - n0) * sizeof(float));
|
||||||
|
|
||||||
|
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
||||||
|
m_audio_len = m_audio.size();
|
||||||
|
} else {
|
||||||
|
memcpy(&m_audio[m_audio_pos], stream, n_samples * sizeof(float));
|
||||||
|
|
||||||
|
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
||||||
|
m_audio_len = std::min(m_audio_len + n_samples, m_audio.size());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void audio_async::get(int ms, std::vector<float> & result) {
|
||||||
|
if (!m_dev_id_in) {
|
||||||
|
fprintf(stderr, "%s: no audio device to get audio from!\n", __func__);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!m_running) {
|
||||||
|
fprintf(stderr, "%s: not running!\n", __func__);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
result.clear();
|
||||||
|
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(m_mutex);
|
||||||
|
|
||||||
|
if (ms <= 0) {
|
||||||
|
ms = m_len_ms;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t n_samples = (m_sample_rate * ms) / 1000;
|
||||||
|
if (n_samples > m_audio_len) {
|
||||||
|
n_samples = m_audio_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
result.resize(n_samples);
|
||||||
|
|
||||||
|
int s0 = m_audio_pos - n_samples;
|
||||||
|
if (s0 < 0) {
|
||||||
|
s0 += m_audio.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (s0 + n_samples > m_audio.size()) {
|
||||||
|
const size_t n0 = m_audio.size() - s0;
|
||||||
|
|
||||||
|
memcpy(result.data(), &m_audio[s0], n0 * sizeof(float));
|
||||||
|
memcpy(&result[n0], &m_audio[0], (n_samples - n0) * sizeof(float));
|
||||||
|
} else {
|
||||||
|
memcpy(result.data(), &m_audio[s0], n_samples * sizeof(float));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool sdl_poll_events() {
|
||||||
|
SDL_Event event;
|
||||||
|
while (SDL_PollEvent(&event)) {
|
||||||
|
switch (event.type) {
|
||||||
|
case SDL_QUIT:
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
} break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
50
examples/common-sdl.h
Normal file
50
examples/common-sdl.h
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <SDL.h>
|
||||||
|
#include <SDL_audio.h>
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <vector>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
//
|
||||||
|
// SDL Audio capture
|
||||||
|
//
|
||||||
|
|
||||||
|
class audio_async {
|
||||||
|
public:
|
||||||
|
audio_async(int len_ms);
|
||||||
|
~audio_async();
|
||||||
|
|
||||||
|
bool init(int capture_id, int sample_rate);
|
||||||
|
|
||||||
|
// start capturing audio via the provided SDL callback
|
||||||
|
// keep last len_ms seconds of audio in a circular buffer
|
||||||
|
bool resume();
|
||||||
|
bool pause();
|
||||||
|
bool clear();
|
||||||
|
|
||||||
|
// callback to be called by SDL
|
||||||
|
void callback(uint8_t * stream, int len);
|
||||||
|
|
||||||
|
// get audio data from the circular buffer
|
||||||
|
void get(int ms, std::vector<float> & audio);
|
||||||
|
|
||||||
|
private:
|
||||||
|
SDL_AudioDeviceID m_dev_id_in = 0;
|
||||||
|
|
||||||
|
int m_len_ms = 0;
|
||||||
|
int m_sample_rate = 0;
|
||||||
|
|
||||||
|
std::atomic_bool m_running;
|
||||||
|
std::mutex m_mutex;
|
||||||
|
|
||||||
|
std::vector<float> m_audio;
|
||||||
|
std::vector<float> m_audio_new;
|
||||||
|
size_t m_audio_pos = 0;
|
||||||
|
size_t m_audio_len = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Return false if need to quit
|
||||||
|
bool sdl_poll_events();
|
162
examples/common.cpp
Normal file
162
examples/common.cpp
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
#include "common.h"
|
||||||
|
|
||||||
|
// third-party utilities
|
||||||
|
// use your favorite implementations
|
||||||
|
#define DR_WAV_IMPLEMENTATION
|
||||||
|
#include "dr_wav.h"
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
#include <regex>
|
||||||
|
|
||||||
|
#ifndef M_PI
|
||||||
|
#define M_PI 3.14159265358979323846
|
||||||
|
#endif
|
||||||
|
|
||||||
|
std::string trim(const std::string & s) {
|
||||||
|
std::regex e("^\\s+|\\s+$");
|
||||||
|
return std::regex_replace(s, e, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string replace(const std::string & s, const std::string & from, const std::string & to) {
|
||||||
|
std::string result = s;
|
||||||
|
size_t pos = 0;
|
||||||
|
while ((pos = result.find(from, pos)) != std::string::npos) {
|
||||||
|
result.replace(pos, from.length(), to);
|
||||||
|
pos += to.length();
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool read_wav(const std::string & fname, std::vector<float>& pcmf32, std::vector<std::vector<float>>& pcmf32s, bool stereo) {
|
||||||
|
drwav wav;
|
||||||
|
std::vector<uint8_t> wav_data; // used for pipe input from stdin
|
||||||
|
|
||||||
|
if (fname == "-") {
|
||||||
|
{
|
||||||
|
uint8_t buf[1024];
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
const size_t n = fread(buf, 1, sizeof(buf), stdin);
|
||||||
|
if (n == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
wav_data.insert(wav_data.end(), buf, buf + n);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (drwav_init_memory(&wav, wav_data.data(), wav_data.size(), nullptr) == false) {
|
||||||
|
fprintf(stderr, "error: failed to open WAV file from stdin\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: read %zu bytes from stdin\n", __func__, wav_data.size());
|
||||||
|
}
|
||||||
|
else if (drwav_init_file(&wav, fname.c_str(), nullptr) == false) {
|
||||||
|
fprintf(stderr, "error: failed to open '%s' as WAV file\n", fname.c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (wav.channels != 1 && wav.channels != 2) {
|
||||||
|
fprintf(stderr, "%s: WAV file '%s' must be mono or stereo\n", __func__, fname.c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stereo && wav.channels != 2) {
|
||||||
|
fprintf(stderr, "%s: WAV file '%s' must be stereo for diarization\n", __func__, fname.c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (wav.sampleRate != COMMON_SAMPLE_RATE) {
|
||||||
|
fprintf(stderr, "%s: WAV file '%s' must be %i kHz\n", __func__, fname.c_str(), COMMON_SAMPLE_RATE/1000);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (wav.bitsPerSample != 16) {
|
||||||
|
fprintf(stderr, "%s: WAV file '%s' must be 16-bit\n", __func__, fname.c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const uint64_t n = wav_data.empty() ? wav.totalPCMFrameCount : wav_data.size()/(wav.channels*wav.bitsPerSample/8);
|
||||||
|
|
||||||
|
std::vector<int16_t> pcm16;
|
||||||
|
pcm16.resize(n*wav.channels);
|
||||||
|
drwav_read_pcm_frames_s16(&wav, n, pcm16.data());
|
||||||
|
drwav_uninit(&wav);
|
||||||
|
|
||||||
|
// convert to mono, float
|
||||||
|
pcmf32.resize(n);
|
||||||
|
if (wav.channels == 1) {
|
||||||
|
for (uint64_t i = 0; i < n; i++) {
|
||||||
|
pcmf32[i] = float(pcm16[i])/32768.0f;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (uint64_t i = 0; i < n; i++) {
|
||||||
|
pcmf32[i] = float(pcm16[2*i] + pcm16[2*i + 1])/65536.0f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stereo) {
|
||||||
|
// convert to stereo, float
|
||||||
|
pcmf32s.resize(2);
|
||||||
|
|
||||||
|
pcmf32s[0].resize(n);
|
||||||
|
pcmf32s[1].resize(n);
|
||||||
|
for (uint64_t i = 0; i < n; i++) {
|
||||||
|
pcmf32s[0][i] = float(pcm16[2*i])/32768.0f;
|
||||||
|
pcmf32s[1][i] = float(pcm16[2*i + 1])/32768.0f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void high_pass_filter(std::vector<float> & data, float cutoff, float sample_rate) {
|
||||||
|
const float rc = 1.0f / (2.0f * M_PI * cutoff);
|
||||||
|
const float dt = 1.0f / sample_rate;
|
||||||
|
const float alpha = dt / (rc + dt);
|
||||||
|
|
||||||
|
float y = data[0];
|
||||||
|
|
||||||
|
for (size_t i = 1; i < data.size(); i++) {
|
||||||
|
y = alpha * (y + data[i] - data[i - 1]);
|
||||||
|
data[i] = y;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool vad_simple(std::vector<float> & pcmf32, int sample_rate, int last_ms, float vad_thold, float freq_thold, bool verbose) {
|
||||||
|
const int n_samples = pcmf32.size();
|
||||||
|
const int n_samples_last = (sample_rate * last_ms) / 1000;
|
||||||
|
|
||||||
|
if (n_samples_last >= n_samples) {
|
||||||
|
// not enough samples - assume no speech
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (freq_thold > 0.0f) {
|
||||||
|
high_pass_filter(pcmf32, freq_thold, sample_rate);
|
||||||
|
}
|
||||||
|
|
||||||
|
float energy_all = 0.0f;
|
||||||
|
float energy_last = 0.0f;
|
||||||
|
|
||||||
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
energy_all += fabsf(pcmf32[i]);
|
||||||
|
|
||||||
|
if (i >= n_samples - n_samples_last) {
|
||||||
|
energy_last += fabsf(pcmf32[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
energy_all /= n_samples;
|
||||||
|
energy_last /= n_samples_last;
|
||||||
|
|
||||||
|
if (verbose) {
|
||||||
|
fprintf(stderr, "%s: energy_all: %f, energy_last: %f, vad_thold: %f, freq_thold: %f\n", __func__, energy_all, energy_last, vad_thold, freq_thold);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (energy_last > vad_thold*energy_all) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
40
examples/common.h
Normal file
40
examples/common.h
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
// needs to match WHISPER_SAMPLE_RATE
|
||||||
|
#define COMMON_SAMPLE_RATE 16000
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
std::string trim(const std::string & s);
|
||||||
|
|
||||||
|
std::string replace(
|
||||||
|
const std::string & s,
|
||||||
|
const std::string & from,
|
||||||
|
const std::string & to);
|
||||||
|
|
||||||
|
// Read WAV audio file and store the PCM data into pcmf32
|
||||||
|
// The sample rate of the audio must be equal to COMMON_SAMPLE_RATE
|
||||||
|
// If stereo flag is set and the audio has 2 channels, the pcmf32s will contain 2 channel PCM
|
||||||
|
bool read_wav(
|
||||||
|
const std::string & fname,
|
||||||
|
std::vector<float> & pcmf32,
|
||||||
|
std::vector<std::vector<float>> & pcmf32s,
|
||||||
|
bool stereo);
|
||||||
|
|
||||||
|
// Apply a high-pass frequency filter to PCM audio
|
||||||
|
// Suppresses frequencies below cutoff Hz
|
||||||
|
void high_pass_filter(
|
||||||
|
std::vector<float> & data,
|
||||||
|
float cutoff,
|
||||||
|
float sample_rate);
|
||||||
|
|
||||||
|
// Basic voice activity detection (VAD) using audio energy adaptive threshold
|
||||||
|
bool vad_simple(
|
||||||
|
std::vector<float> & pcmf32,
|
||||||
|
int sample_rate,
|
||||||
|
int last_ms,
|
||||||
|
float vad_thold,
|
||||||
|
float freq_thold,
|
||||||
|
bool verbose);
|
||||||
|
|
@ -8,7 +8,7 @@ function convertTypedArray(src, type) {
|
|||||||
|
|
||||||
var printTextarea = (function() {
|
var printTextarea = (function() {
|
||||||
var element = document.getElementById('output');
|
var element = document.getElementById('output');
|
||||||
if (element) element.alue = ''; // clear browser cache
|
if (element) element.value = ''; // clear browser cache
|
||||||
return function(text) {
|
return function(text) {
|
||||||
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
|
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
|
||||||
console.log(text);
|
console.log(text);
|
||||||
@ -88,11 +88,15 @@ async function fetchRemote(url, cbProgress, cbPrint) {
|
|||||||
// - check if the data is already in the IndexedDB
|
// - check if the data is already in the IndexedDB
|
||||||
// - if not, fetch it from the remote URL and store it in the IndexedDB
|
// - if not, fetch it from the remote URL and store it in the IndexedDB
|
||||||
function loadRemote(url, dst, size_mb, cbProgress, cbReady, cbCancel, cbPrint) {
|
function loadRemote(url, dst, size_mb, cbProgress, cbReady, cbCancel, cbPrint) {
|
||||||
// query the storage quota and print it
|
if (!navigator.storage || !navigator.storage.estimate) {
|
||||||
navigator.storage.estimate().then(function (estimate) {
|
cbPrint('loadRemote: navigator.storage.estimate() is not supported');
|
||||||
cbPrint('loadRemote: storage quota: ' + estimate.quota + ' bytes');
|
} else {
|
||||||
cbPrint('loadRemote: storage usage: ' + estimate.usage + ' bytes');
|
// query the storage quota and print it
|
||||||
});
|
navigator.storage.estimate().then(function (estimate) {
|
||||||
|
cbPrint('loadRemote: storage quota: ' + estimate.quota + ' bytes');
|
||||||
|
cbPrint('loadRemote: storage usage: ' + estimate.usage + ' bytes');
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// check if the data is already in the IndexedDB
|
// check if the data is already in the IndexedDB
|
||||||
var rq = indexedDB.open(dbName, dbVersion);
|
var rq = indexedDB.open(dbName, dbVersion);
|
||||||
|
@ -100,7 +100,7 @@ while [ $running -eq 1 ]; do
|
|||||||
err=$(cat /tmp/whisper-live.err | wc -l)
|
err=$(cat /tmp/whisper-live.err | wc -l)
|
||||||
done
|
done
|
||||||
|
|
||||||
./main -t 8 -m ./models/ggml-base.en.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1
|
./main -t 8 -m ./models/ggml-${model}.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1
|
||||||
|
|
||||||
while [ $SECONDS -lt $((($i+1)*$step_s)) ]; do
|
while [ $SECONDS -lt $((($i+1)*$step_s)) ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
|
@ -3,4 +3,4 @@ add_executable(${TARGET} main.cpp)
|
|||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE whisper ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
@ -9,25 +9,35 @@ It can be used as a reference for using the `whisper.cpp` library in other proje
|
|||||||
usage: ./main [options] file0.wav file1.wav ...
|
usage: ./main [options] file0.wav file1.wav ...
|
||||||
|
|
||||||
options:
|
options:
|
||||||
-h, --help [default] show this help message and exit
|
-h, --help [default] show this help message and exit
|
||||||
-t N, --threads N [4 ] number of threads to use during computation
|
-t N, --threads N [4 ] number of threads to use during computation
|
||||||
-p N, --processors N [1 ] number of processors to use during computation
|
-p N, --processors N [1 ] number of processors to use during computation
|
||||||
-ot N, --offset-t N [0 ] time offset in milliseconds
|
-ot N, --offset-t N [0 ] time offset in milliseconds
|
||||||
-on N, --offset-n N [0 ] segment index offset
|
-on N, --offset-n N [0 ] segment index offset
|
||||||
-d N, --duration N [0 ] duration of audio to process in milliseconds
|
-d N, --duration N [0 ] duration of audio to process in milliseconds
|
||||||
-mc N, --max-context N [-1 ] maximum number of text context tokens to store
|
-mc N, --max-context N [-1 ] maximum number of text context tokens to store
|
||||||
-ml N, --max-len N [0 ] maximum segment length in characters
|
-ml N, --max-len N [0 ] maximum segment length in characters
|
||||||
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
-bo N, --best-of N [5 ] number of best candidates to keep
|
||||||
-su, --speed-up [false ] speed up audio by x2 (reduced accuracy)
|
-bs N, --beam-size N [-1 ] beam size for beam search
|
||||||
-tr, --translate [false ] translate from source language to english
|
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
||||||
-otxt, --output-txt [false ] output result in a text file
|
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
||||||
-ovtt, --output-vtt [false ] output result in a vtt file
|
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
||||||
-osrt, --output-srt [false ] output result in a srt file
|
-su, --speed-up [false ] speed up audio by x2 (reduced accuracy)
|
||||||
-owts, --output-words [false ] output script for generating karaoke video
|
-tr, --translate [false ] translate from source language to english
|
||||||
-ps, --print-special [false ] print special tokens
|
-di, --diarize [false ] stereo audio diarization
|
||||||
-pc, --print-colors [false ] print colors
|
-nf, --no-fallback [false ] do not use temperature fallback while decoding
|
||||||
-nt, --no-timestamps [true ] do not print timestamps
|
-otxt, --output-txt [false ] output result in a text file
|
||||||
-l LANG, --language LANG [en ] spoken language
|
-ovtt, --output-vtt [false ] output result in a vtt file
|
||||||
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
-osrt, --output-srt [false ] output result in a srt file
|
||||||
-f FNAME, --file FNAME [ ] input WAV file path
|
-owts, --output-words [false ] output script for generating karaoke video
|
||||||
|
-ocsv, --output-csv [false ] output result in a CSV file
|
||||||
|
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
|
||||||
|
-ps, --print-special [false ] print special tokens
|
||||||
|
-pc, --print-colors [false ] print colors
|
||||||
|
-pp, --print-progress [false ] print progress
|
||||||
|
-nt, --no-timestamps [true ] do not print timestamps
|
||||||
|
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
||||||
|
--prompt PROMPT [ ] initial prompt
|
||||||
|
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
||||||
|
-f FNAME, --file FNAME [ ] input WAV file path
|
||||||
```
|
```
|
||||||
|
@ -1,9 +1,6 @@
|
|||||||
#include "whisper.h"
|
#include "common.h"
|
||||||
|
|
||||||
// third-party utilities
|
#include "whisper.h"
|
||||||
// use your favorite implementations
|
|
||||||
#define DR_WAV_IMPLEMENTATION
|
|
||||||
#include "dr_wav.h"
|
|
||||||
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
@ -53,22 +50,24 @@ void replace_all(std::string & s, const std::string & search, const std::string
|
|||||||
// command-line parameters
|
// command-line parameters
|
||||||
struct whisper_params {
|
struct whisper_params {
|
||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
int32_t n_processors = 1;
|
int32_t n_processors = 1;
|
||||||
int32_t offset_t_ms = 0;
|
int32_t offset_t_ms = 0;
|
||||||
int32_t offset_n = 0;
|
int32_t offset_n = 0;
|
||||||
int32_t duration_ms = 0;
|
int32_t duration_ms = 0;
|
||||||
int32_t max_context = -1;
|
int32_t max_context = -1;
|
||||||
int32_t max_len = 0;
|
int32_t max_len = 0;
|
||||||
int32_t best_of = 5;
|
int32_t best_of = 5;
|
||||||
int32_t beam_size = -1;
|
int32_t beam_size = -1;
|
||||||
|
|
||||||
float word_thold = 0.01f;
|
float word_thold = 0.01f;
|
||||||
float entropy_thold = 2.4f;
|
float entropy_thold = 2.40f;
|
||||||
float logprob_thold = -1.0f;
|
float logprob_thold = -1.00f;
|
||||||
|
|
||||||
bool speed_up = false;
|
bool speed_up = false;
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
bool diarize = false;
|
bool diarize = false;
|
||||||
|
bool split_on_word = false;
|
||||||
|
bool no_fallback = false;
|
||||||
bool output_txt = false;
|
bool output_txt = false;
|
||||||
bool output_vtt = false;
|
bool output_vtt = false;
|
||||||
bool output_srt = false;
|
bool output_srt = false;
|
||||||
@ -81,10 +80,11 @@ struct whisper_params {
|
|||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string prompt;
|
std::string prompt;
|
||||||
|
std::string font_path = "/System/Library/Fonts/Supplemental/Courier New Bold.ttf";
|
||||||
std::string model = "models/ggml-base.en.bin";
|
std::string model = "models/ggml-base.en.bin";
|
||||||
|
|
||||||
std::vector<std::string> fname_inp = {};
|
std::vector<std::string> fname_inp = {};
|
||||||
std::vector<std::string> fname_outp = {};
|
std::vector<std::string> fname_out = {};
|
||||||
};
|
};
|
||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
@ -93,6 +93,11 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
|
|
||||||
|
if (arg == "-"){
|
||||||
|
params.fname_inp.push_back(arg);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (arg[0] != '-') {
|
if (arg[0] != '-') {
|
||||||
params.fname_inp.push_back(arg);
|
params.fname_inp.push_back(arg);
|
||||||
continue;
|
continue;
|
||||||
@ -117,12 +122,15 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
|
else if (arg == "-su" || arg == "--speed-up") { params.speed_up = true; }
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
|
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
|
||||||
|
else if (arg == "-sow" || arg == "--split-on-word") { params.split_on_word = true; }
|
||||||
|
else if (arg == "-nf" || arg == "--no-fallback") { params.no_fallback = true; }
|
||||||
else if (arg == "-otxt" || arg == "--output-txt") { params.output_txt = true; }
|
else if (arg == "-otxt" || arg == "--output-txt") { params.output_txt = true; }
|
||||||
else if (arg == "-ovtt" || arg == "--output-vtt") { params.output_vtt = true; }
|
else if (arg == "-ovtt" || arg == "--output-vtt") { params.output_vtt = true; }
|
||||||
else if (arg == "-osrt" || arg == "--output-srt") { params.output_srt = true; }
|
else if (arg == "-osrt" || arg == "--output-srt") { params.output_srt = true; }
|
||||||
else if (arg == "-owts" || arg == "--output-words") { params.output_wts = true; }
|
else if (arg == "-owts" || arg == "--output-words") { params.output_wts = true; }
|
||||||
|
else if (arg == "-fp" || arg == "--font-path") { params.font_path = argv[++i]; }
|
||||||
else if (arg == "-ocsv" || arg == "--output-csv") { params.output_csv = true; }
|
else if (arg == "-ocsv" || arg == "--output-csv") { params.output_csv = true; }
|
||||||
else if (arg == "-of" || arg == "--output-file") { params.fname_outp.emplace_back(argv[++i]); }
|
else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(argv[++i]); }
|
||||||
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
||||||
else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
|
else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
|
||||||
else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
|
else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
|
||||||
@ -154,6 +162,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -d N, --duration N [%-7d] duration of audio to process in milliseconds\n", params.duration_ms);
|
fprintf(stderr, " -d N, --duration N [%-7d] duration of audio to process in milliseconds\n", params.duration_ms);
|
||||||
fprintf(stderr, " -mc N, --max-context N [%-7d] maximum number of text context tokens to store\n", params.max_context);
|
fprintf(stderr, " -mc N, --max-context N [%-7d] maximum number of text context tokens to store\n", params.max_context);
|
||||||
fprintf(stderr, " -ml N, --max-len N [%-7d] maximum segment length in characters\n", params.max_len);
|
fprintf(stderr, " -ml N, --max-len N [%-7d] maximum segment length in characters\n", params.max_len);
|
||||||
|
fprintf(stderr, " -sow, --split-on-word [%-7s] split on word rather than on token\n", params.split_on_word ? "true" : "false");
|
||||||
fprintf(stderr, " -bo N, --best-of N [%-7d] number of best candidates to keep\n", params.best_of);
|
fprintf(stderr, " -bo N, --best-of N [%-7d] number of best candidates to keep\n", params.best_of);
|
||||||
fprintf(stderr, " -bs N, --beam-size N [%-7d] beam size for beam search\n", params.beam_size);
|
fprintf(stderr, " -bs N, --beam-size N [%-7d] beam size for beam search\n", params.beam_size);
|
||||||
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n", params.word_thold);
|
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n", params.word_thold);
|
||||||
@ -162,10 +171,12 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
|
fprintf(stderr, " -su, --speed-up [%-7s] speed up audio by x2 (reduced accuracy)\n", params.speed_up ? "true" : "false");
|
||||||
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
||||||
fprintf(stderr, " -di, --diarize [%-7s] stereo audio diarization\n", params.diarize ? "true" : "false");
|
fprintf(stderr, " -di, --diarize [%-7s] stereo audio diarization\n", params.diarize ? "true" : "false");
|
||||||
|
fprintf(stderr, " -nf, --no-fallback [%-7s] do not use temperature fallback while decoding\n", params.no_fallback ? "true" : "false");
|
||||||
fprintf(stderr, " -otxt, --output-txt [%-7s] output result in a text file\n", params.output_txt ? "true" : "false");
|
fprintf(stderr, " -otxt, --output-txt [%-7s] output result in a text file\n", params.output_txt ? "true" : "false");
|
||||||
fprintf(stderr, " -ovtt, --output-vtt [%-7s] output result in a vtt file\n", params.output_vtt ? "true" : "false");
|
fprintf(stderr, " -ovtt, --output-vtt [%-7s] output result in a vtt file\n", params.output_vtt ? "true" : "false");
|
||||||
fprintf(stderr, " -osrt, --output-srt [%-7s] output result in a srt file\n", params.output_srt ? "true" : "false");
|
fprintf(stderr, " -osrt, --output-srt [%-7s] output result in a srt file\n", params.output_srt ? "true" : "false");
|
||||||
fprintf(stderr, " -owts, --output-words [%-7s] output script for generating karaoke video\n", params.output_wts ? "true" : "false");
|
fprintf(stderr, " -owts, --output-words [%-7s] output script for generating karaoke video\n", params.output_wts ? "true" : "false");
|
||||||
|
fprintf(stderr, " -fp, --font-path [%-7s] path to a monospace font for karaoke video\n", params.font_path.c_str());
|
||||||
fprintf(stderr, " -ocsv, --output-csv [%-7s] output result in a CSV file\n", params.output_csv ? "true" : "false");
|
fprintf(stderr, " -ocsv, --output-csv [%-7s] output result in a CSV file\n", params.output_csv ? "true" : "false");
|
||||||
fprintf(stderr, " -of FNAME, --output-file FNAME [%-7s] output file path (without file extension)\n", "");
|
fprintf(stderr, " -of FNAME, --output-file FNAME [%-7s] output file path (without file extension)\n", "");
|
||||||
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
||||||
@ -185,7 +196,7 @@ struct whisper_print_user_data {
|
|||||||
const std::vector<std::vector<float>> * pcmf32s;
|
const std::vector<std::vector<float>> * pcmf32s;
|
||||||
};
|
};
|
||||||
|
|
||||||
void whisper_print_segment_callback(struct whisper_context * ctx, int n_new, void * user_data) {
|
void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper_state * /*state*/, int n_new, void * user_data) {
|
||||||
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
||||||
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
||||||
|
|
||||||
@ -344,16 +355,14 @@ bool output_csv(struct whisper_context * ctx, const char * fname) {
|
|||||||
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
||||||
|
|
||||||
const int n_segments = whisper_full_n_segments(ctx);
|
const int n_segments = whisper_full_n_segments(ctx);
|
||||||
|
fout << "start,end,text\n";
|
||||||
for (int i = 0; i < n_segments; ++i) {
|
for (int i = 0; i < n_segments; ++i) {
|
||||||
const char * text = whisper_full_get_segment_text(ctx, i);
|
const char * text = whisper_full_get_segment_text(ctx, i);
|
||||||
if (text[0] == ' ') {
|
|
||||||
text = text + sizeof(char); //whisper_full_get_segment_text() returns a string with leading space, point to the next character.
|
|
||||||
}
|
|
||||||
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
||||||
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
||||||
|
|
||||||
//need to multiply times returned from whisper_full_get_segment_t{0,1}() by 10 to get milliseconds.
|
//need to multiply times returned from whisper_full_get_segment_t{0,1}() by 10 to get milliseconds.
|
||||||
fout << 10 * t0 << ", " << 10 * t1 << ", \"" << text << "\"\n";
|
fout << 10 * t0 << "," << 10 * t1 << ",\"" << text << "\"\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -362,13 +371,18 @@ bool output_csv(struct whisper_context * ctx, const char * fname) {
|
|||||||
// karaoke video generation
|
// karaoke video generation
|
||||||
// outputs a bash script that uses ffmpeg to generate a video with the subtitles
|
// outputs a bash script that uses ffmpeg to generate a video with the subtitles
|
||||||
// TODO: font parameter adjustments
|
// TODO: font parameter adjustments
|
||||||
bool output_wts(struct whisper_context * ctx, const char * fname, const char * fname_inp, const whisper_params & /*params*/, float t_sec) {
|
bool output_wts(struct whisper_context * ctx, const char * fname, const char * fname_inp, const whisper_params & params, float t_sec) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
|
|
||||||
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
||||||
|
|
||||||
// TODO: become parameter
|
static const char * font = params.font_path.c_str();
|
||||||
static const char * font = "/System/Library/Fonts/Supplemental/Courier New Bold.ttf";
|
|
||||||
|
std::ifstream fin(font);
|
||||||
|
if (!fin.is_open()) {
|
||||||
|
fprintf(stderr, "%s: font not found at '%s', please specify a monospace font with -fp\n", __func__, font);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
fout << "#!/bin/bash" << "\n";
|
fout << "#!/bin/bash" << "\n";
|
||||||
fout << "\n";
|
fout << "\n";
|
||||||
@ -517,91 +531,14 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
for (int f = 0; f < (int) params.fname_inp.size(); ++f) {
|
for (int f = 0; f < (int) params.fname_inp.size(); ++f) {
|
||||||
const auto fname_inp = params.fname_inp[f];
|
const auto fname_inp = params.fname_inp[f];
|
||||||
const auto fname_outp = f < params.fname_outp.size() && !params.fname_outp[f].empty() ? params.fname_outp[f] : params.fname_inp[f];
|
const auto fname_out = f < (int) params.fname_out.size() && !params.fname_out[f].empty() ? params.fname_out[f] : params.fname_inp[f];
|
||||||
|
|
||||||
std::vector<float> pcmf32; // mono-channel F32 PCM
|
std::vector<float> pcmf32; // mono-channel F32 PCM
|
||||||
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
||||||
|
|
||||||
// WAV input
|
if (!::read_wav(fname_inp, pcmf32, pcmf32s, params.diarize)) {
|
||||||
{
|
fprintf(stderr, "error: failed to read WAV file '%s'\n", fname_inp.c_str());
|
||||||
drwav wav;
|
continue;
|
||||||
std::vector<uint8_t> wav_data; // used for pipe input from stdin
|
|
||||||
|
|
||||||
if (fname_inp == "-") {
|
|
||||||
{
|
|
||||||
uint8_t buf[1024];
|
|
||||||
while (true)
|
|
||||||
{
|
|
||||||
const size_t n = fread(buf, 1, sizeof(buf), stdin);
|
|
||||||
if (n == 0) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
wav_data.insert(wav_data.end(), buf, buf + n);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (drwav_init_memory(&wav, wav_data.data(), wav_data.size(), nullptr) == false) {
|
|
||||||
fprintf(stderr, "error: failed to open WAV file from stdin\n");
|
|
||||||
return 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
fprintf(stderr, "%s: read %zu bytes from stdin\n", __func__, wav_data.size());
|
|
||||||
}
|
|
||||||
else if (drwav_init_file(&wav, fname_inp.c_str(), nullptr) == false) {
|
|
||||||
fprintf(stderr, "error: failed to open '%s' as WAV file\n", fname_inp.c_str());
|
|
||||||
return 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (wav.channels != 1 && wav.channels != 2) {
|
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be mono or stereo\n", argv[0], fname_inp.c_str());
|
|
||||||
return 6;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (params.diarize && wav.channels != 2 && params.no_timestamps == false) {
|
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be stereo for diarization and timestamps have to be enabled\n", argv[0], fname_inp.c_str());
|
|
||||||
return 6;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (wav.sampleRate != WHISPER_SAMPLE_RATE) {
|
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be %i kHz\n", argv[0], fname_inp.c_str(), WHISPER_SAMPLE_RATE/1000);
|
|
||||||
return 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (wav.bitsPerSample != 16) {
|
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be 16-bit\n", argv[0], fname_inp.c_str());
|
|
||||||
return 9;
|
|
||||||
}
|
|
||||||
|
|
||||||
const uint64_t n = wav_data.empty() ? wav.totalPCMFrameCount : wav_data.size()/(wav.channels*wav.bitsPerSample/8);
|
|
||||||
|
|
||||||
std::vector<int16_t> pcm16;
|
|
||||||
pcm16.resize(n*wav.channels);
|
|
||||||
drwav_read_pcm_frames_s16(&wav, n, pcm16.data());
|
|
||||||
drwav_uninit(&wav);
|
|
||||||
|
|
||||||
// convert to mono, float
|
|
||||||
pcmf32.resize(n);
|
|
||||||
if (wav.channels == 1) {
|
|
||||||
for (uint64_t i = 0; i < n; i++) {
|
|
||||||
pcmf32[i] = float(pcm16[i])/32768.0f;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for (uint64_t i = 0; i < n; i++) {
|
|
||||||
pcmf32[i] = float(pcm16[2*i] + pcm16[2*i + 1])/65536.0f;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (params.diarize) {
|
|
||||||
// convert to stereo, float
|
|
||||||
pcmf32s.resize(2);
|
|
||||||
|
|
||||||
pcmf32s[0].resize(n);
|
|
||||||
pcmf32s[1].resize(n);
|
|
||||||
for (uint64_t i = 0; i < n; i++) {
|
|
||||||
pcmf32s[0][i] = float(pcm16[2*i])/32768.0f;
|
|
||||||
pcmf32s[1][i] = float(pcm16[2*i + 1])/32768.0f;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// print system information
|
// print system information
|
||||||
@ -650,17 +587,20 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
wparams.token_timestamps = params.output_wts || params.max_len > 0;
|
wparams.token_timestamps = params.output_wts || params.max_len > 0;
|
||||||
wparams.thold_pt = params.word_thold;
|
wparams.thold_pt = params.word_thold;
|
||||||
wparams.entropy_thold = params.entropy_thold;
|
|
||||||
wparams.logprob_thold = params.logprob_thold;
|
|
||||||
wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
|
wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
|
||||||
|
wparams.split_on_word = params.split_on_word;
|
||||||
|
|
||||||
wparams.speed_up = params.speed_up;
|
wparams.speed_up = params.speed_up;
|
||||||
|
|
||||||
|
wparams.prompt_tokens = prompt_tokens.empty() ? nullptr : prompt_tokens.data();
|
||||||
|
wparams.prompt_n_tokens = prompt_tokens.empty() ? 0 : prompt_tokens.size();
|
||||||
|
|
||||||
wparams.greedy.best_of = params.best_of;
|
wparams.greedy.best_of = params.best_of;
|
||||||
wparams.beam_search.beam_size = params.beam_size;
|
wparams.beam_search.beam_size = params.beam_size;
|
||||||
|
|
||||||
wparams.prompt_tokens = prompt_tokens.empty() ? nullptr : prompt_tokens.data();
|
wparams.temperature_inc = params.no_fallback ? 0.0f : wparams.temperature_inc;
|
||||||
wparams.prompt_n_tokens = prompt_tokens.empty() ? 0 : prompt_tokens.size();
|
wparams.entropy_thold = params.entropy_thold;
|
||||||
|
wparams.logprob_thold = params.logprob_thold;
|
||||||
|
|
||||||
whisper_print_user_data user_data = { ¶ms, &pcmf32s };
|
whisper_print_user_data user_data = { ¶ms, &pcmf32s };
|
||||||
|
|
||||||
@ -676,7 +616,7 @@ int main(int argc, char ** argv) {
|
|||||||
{
|
{
|
||||||
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
||||||
|
|
||||||
wparams.encoder_begin_callback = [](struct whisper_context * /*ctx*/, void * user_data) {
|
wparams.encoder_begin_callback = [](struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, void * user_data) {
|
||||||
bool is_aborted = *(bool*)user_data;
|
bool is_aborted = *(bool*)user_data;
|
||||||
return !is_aborted;
|
return !is_aborted;
|
||||||
};
|
};
|
||||||
@ -695,34 +635,33 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// output to text file
|
// output to text file
|
||||||
if (params.output_txt) {
|
if (params.output_txt) {
|
||||||
const auto fname_txt = fname_outp + ".txt";
|
const auto fname_txt = fname_out + ".txt";
|
||||||
output_txt(ctx, fname_txt.c_str());
|
output_txt(ctx, fname_txt.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
// output to VTT file
|
// output to VTT file
|
||||||
if (params.output_vtt) {
|
if (params.output_vtt) {
|
||||||
const auto fname_vtt = fname_outp + ".vtt";
|
const auto fname_vtt = fname_out + ".vtt";
|
||||||
output_vtt(ctx, fname_vtt.c_str());
|
output_vtt(ctx, fname_vtt.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
// output to SRT file
|
// output to SRT file
|
||||||
if (params.output_srt) {
|
if (params.output_srt) {
|
||||||
const auto fname_srt = fname_outp + ".srt";
|
const auto fname_srt = fname_out + ".srt";
|
||||||
output_srt(ctx, fname_srt.c_str(), params);
|
output_srt(ctx, fname_srt.c_str(), params);
|
||||||
}
|
}
|
||||||
|
|
||||||
// output to WTS file
|
// output to WTS file
|
||||||
if (params.output_wts) {
|
if (params.output_wts) {
|
||||||
const auto fname_wts = fname_outp + ".wts";
|
const auto fname_wts = fname_out + ".wts";
|
||||||
output_wts(ctx, fname_wts.c_str(), fname_inp.c_str(), params, float(pcmf32.size() + 1000)/WHISPER_SAMPLE_RATE);
|
output_wts(ctx, fname_wts.c_str(), fname_inp.c_str(), params, float(pcmf32.size() + 1000)/WHISPER_SAMPLE_RATE);
|
||||||
}
|
}
|
||||||
|
|
||||||
// output to CSV file
|
// output to CSV file
|
||||||
if (params.output_csv) {
|
if (params.output_csv) {
|
||||||
const auto fname_csv = fname_outp + ".csv";
|
const auto fname_csv = fname_out + ".csv";
|
||||||
output_csv(ctx, fname_csv.c_str());
|
output_csv(ctx, fname_csv.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,6 +5,5 @@ if (WHISPER_SUPPORT_SDL2)
|
|||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_include_directories(${TARGET} PRIVATE ${SDL2_INCLUDE_DIRS})
|
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
target_link_libraries(${TARGET} PRIVATE whisper ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -3,19 +3,16 @@
|
|||||||
// A very quick-n-dirty implementation serving mainly as a proof of concept.
|
// A very quick-n-dirty implementation serving mainly as a proof of concept.
|
||||||
//
|
//
|
||||||
|
|
||||||
|
#include "common.h"
|
||||||
|
#include "common-sdl.h"
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
|
||||||
#include <SDL.h>
|
|
||||||
#include <SDL_audio.h>
|
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <mutex>
|
|
||||||
|
|
||||||
// 500 -> 00:05.000
|
// 500 -> 00:05.000
|
||||||
// 6000 -> 01:00.000
|
// 6000 -> 01:00.000
|
||||||
@ -116,306 +113,6 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
// SDL Audio capture
|
|
||||||
//
|
|
||||||
|
|
||||||
class audio_async {
|
|
||||||
public:
|
|
||||||
audio_async(int len_ms);
|
|
||||||
~audio_async();
|
|
||||||
|
|
||||||
bool init(int capture_id, int sample_rate);
|
|
||||||
|
|
||||||
// start capturing audio via the provided SDL callback
|
|
||||||
// keep last len_ms seconds of audio in a circular buffer
|
|
||||||
bool resume();
|
|
||||||
bool pause();
|
|
||||||
bool clear();
|
|
||||||
|
|
||||||
// callback to be called by SDL
|
|
||||||
void callback(uint8_t * stream, int len);
|
|
||||||
|
|
||||||
// get audio data from the circular buffer
|
|
||||||
void get(int ms, std::vector<float> & audio);
|
|
||||||
|
|
||||||
private:
|
|
||||||
SDL_AudioDeviceID m_dev_id_in = 0;
|
|
||||||
|
|
||||||
int m_len_ms = 0;
|
|
||||||
int m_sample_rate = 0;
|
|
||||||
|
|
||||||
std::atomic_bool m_running;
|
|
||||||
std::mutex m_mutex;
|
|
||||||
|
|
||||||
std::vector<float> m_audio;
|
|
||||||
std::vector<float> m_audio_new;
|
|
||||||
size_t m_audio_pos = 0;
|
|
||||||
size_t m_audio_len = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
audio_async::audio_async(int len_ms) {
|
|
||||||
m_len_ms = len_ms;
|
|
||||||
|
|
||||||
m_running = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
audio_async::~audio_async() {
|
|
||||||
if (m_dev_id_in) {
|
|
||||||
SDL_CloseAudioDevice(m_dev_id_in);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool audio_async::init(int capture_id, int sample_rate) {
|
|
||||||
SDL_LogSetPriority(SDL_LOG_CATEGORY_APPLICATION, SDL_LOG_PRIORITY_INFO);
|
|
||||||
|
|
||||||
if (SDL_Init(SDL_INIT_AUDIO) < 0) {
|
|
||||||
SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Couldn't initialize SDL: %s\n", SDL_GetError());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_SetHintWithPriority(SDL_HINT_AUDIO_RESAMPLING_MODE, "medium", SDL_HINT_OVERRIDE);
|
|
||||||
|
|
||||||
{
|
|
||||||
int nDevices = SDL_GetNumAudioDevices(SDL_TRUE);
|
|
||||||
fprintf(stderr, "%s: found %d capture devices:\n", __func__, nDevices);
|
|
||||||
for (int i = 0; i < nDevices; i++) {
|
|
||||||
fprintf(stderr, "%s: - Capture device #%d: '%s'\n", __func__, i, SDL_GetAudioDeviceName(i, SDL_TRUE));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_AudioSpec capture_spec_requested;
|
|
||||||
SDL_AudioSpec capture_spec_obtained;
|
|
||||||
|
|
||||||
SDL_zero(capture_spec_requested);
|
|
||||||
SDL_zero(capture_spec_obtained);
|
|
||||||
|
|
||||||
capture_spec_requested.freq = sample_rate;
|
|
||||||
capture_spec_requested.format = AUDIO_F32;
|
|
||||||
capture_spec_requested.channels = 1;
|
|
||||||
capture_spec_requested.samples = 1024;
|
|
||||||
capture_spec_requested.callback = [](void * userdata, uint8_t * stream, int len) {
|
|
||||||
audio_async * audio = (audio_async *) userdata;
|
|
||||||
audio->callback(stream, len);
|
|
||||||
};
|
|
||||||
capture_spec_requested.userdata = this;
|
|
||||||
|
|
||||||
if (capture_id >= 0) {
|
|
||||||
fprintf(stderr, "%s: attempt to open capture device %d : '%s' ...\n", __func__, capture_id, SDL_GetAudioDeviceName(capture_id, SDL_TRUE));
|
|
||||||
m_dev_id_in = SDL_OpenAudioDevice(SDL_GetAudioDeviceName(capture_id, SDL_TRUE), SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
|
|
||||||
} else {
|
|
||||||
fprintf(stderr, "%s: attempt to open default capture device ...\n", __func__);
|
|
||||||
m_dev_id_in = SDL_OpenAudioDevice(nullptr, SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: couldn't open an audio device for capture: %s!\n", __func__, SDL_GetError());
|
|
||||||
m_dev_id_in = 0;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
fprintf(stderr, "%s: obtained spec for input device (SDL Id = %d):\n", __func__, m_dev_id_in);
|
|
||||||
fprintf(stderr, "%s: - sample rate: %d\n", __func__, capture_spec_obtained.freq);
|
|
||||||
fprintf(stderr, "%s: - format: %d (required: %d)\n", __func__, capture_spec_obtained.format,
|
|
||||||
capture_spec_requested.format);
|
|
||||||
fprintf(stderr, "%s: - channels: %d (required: %d)\n", __func__, capture_spec_obtained.channels,
|
|
||||||
capture_spec_requested.channels);
|
|
||||||
fprintf(stderr, "%s: - samples per frame: %d\n", __func__, capture_spec_obtained.samples);
|
|
||||||
}
|
|
||||||
|
|
||||||
m_sample_rate = capture_spec_obtained.freq;
|
|
||||||
|
|
||||||
m_audio.resize((m_sample_rate*m_len_ms)/1000);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool audio_async::resume() {
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: no audio device to resume!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (m_running) {
|
|
||||||
fprintf(stderr, "%s: already running!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_PauseAudioDevice(m_dev_id_in, 0);
|
|
||||||
|
|
||||||
m_running = true;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool audio_async::pause() {
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: no audio device to pause!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!m_running) {
|
|
||||||
fprintf(stderr, "%s: already paused!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_PauseAudioDevice(m_dev_id_in, 1);
|
|
||||||
|
|
||||||
m_running = false;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool audio_async::clear() {
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: no audio device to clear!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!m_running) {
|
|
||||||
fprintf(stderr, "%s: not running!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
|
||||||
|
|
||||||
m_audio_pos = 0;
|
|
||||||
m_audio_len = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// callback to be called by SDL
|
|
||||||
void audio_async::callback(uint8_t * stream, int len) {
|
|
||||||
if (!m_running) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t n_samples = len / sizeof(float);
|
|
||||||
|
|
||||||
m_audio_new.resize(n_samples);
|
|
||||||
memcpy(m_audio_new.data(), stream, n_samples * sizeof(float));
|
|
||||||
|
|
||||||
//fprintf(stderr, "%s: %zu samples, pos %zu, len %zu\n", __func__, n_samples, m_audio_pos, m_audio_len);
|
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
|
||||||
|
|
||||||
if (m_audio_pos + n_samples > m_audio.size()) {
|
|
||||||
const size_t n0 = m_audio.size() - m_audio_pos;
|
|
||||||
|
|
||||||
memcpy(&m_audio[m_audio_pos], stream, n0 * sizeof(float));
|
|
||||||
memcpy(&m_audio[0], &stream[n0], (n_samples - n0) * sizeof(float));
|
|
||||||
|
|
||||||
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
|
||||||
m_audio_len = m_audio.size();
|
|
||||||
} else {
|
|
||||||
memcpy(&m_audio[m_audio_pos], stream, n_samples * sizeof(float));
|
|
||||||
|
|
||||||
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
|
||||||
m_audio_len = std::min(m_audio_len + n_samples, m_audio.size());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void audio_async::get(int ms, std::vector<float> & result) {
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: no audio device to get audio from!\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!m_running) {
|
|
||||||
fprintf(stderr, "%s: not running!\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
result.clear();
|
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
|
||||||
|
|
||||||
if (ms <= 0) {
|
|
||||||
ms = m_len_ms;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t n_samples = (m_sample_rate * ms) / 1000;
|
|
||||||
if (n_samples > m_audio_len) {
|
|
||||||
n_samples = m_audio_len;
|
|
||||||
}
|
|
||||||
|
|
||||||
result.resize(n_samples);
|
|
||||||
|
|
||||||
int s0 = m_audio_pos - n_samples;
|
|
||||||
if (s0 < 0) {
|
|
||||||
s0 += m_audio.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (s0 + n_samples > m_audio.size()) {
|
|
||||||
const size_t n0 = m_audio.size() - s0;
|
|
||||||
|
|
||||||
memcpy(result.data(), &m_audio[s0], n0 * sizeof(float));
|
|
||||||
memcpy(&result[n0], &m_audio[0], (n_samples - n0) * sizeof(float));
|
|
||||||
} else {
|
|
||||||
memcpy(result.data(), &m_audio[s0], n_samples * sizeof(float));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
///////////////////////////
|
|
||||||
|
|
||||||
void high_pass_filter(std::vector<float> & data, float cutoff, float sample_rate) {
|
|
||||||
const float rc = 1.0f / (2.0f * M_PI * cutoff);
|
|
||||||
const float dt = 1.0f / sample_rate;
|
|
||||||
const float alpha = dt / (rc + dt);
|
|
||||||
|
|
||||||
float y = data[0];
|
|
||||||
|
|
||||||
for (size_t i = 1; i < data.size(); i++) {
|
|
||||||
y = alpha * (y + data[i] - data[i - 1]);
|
|
||||||
data[i] = y;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool vad_simple(std::vector<float> & pcmf32, int sample_rate, int last_ms, float vad_thold, float freq_thold, bool verbose) {
|
|
||||||
const int n_samples = pcmf32.size();
|
|
||||||
const int n_samples_last = (sample_rate * last_ms) / 1000;
|
|
||||||
|
|
||||||
if (n_samples_last >= n_samples) {
|
|
||||||
// not enough samples - assume no speech
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (freq_thold > 0.0f) {
|
|
||||||
high_pass_filter(pcmf32, freq_thold, sample_rate);
|
|
||||||
}
|
|
||||||
|
|
||||||
float energy_all = 0.0f;
|
|
||||||
float energy_last = 0.0f;
|
|
||||||
|
|
||||||
for (int i = 0; i < n_samples; i++) {
|
|
||||||
energy_all += fabsf(pcmf32[i]);
|
|
||||||
|
|
||||||
if (i >= n_samples - n_samples_last) {
|
|
||||||
energy_last += fabsf(pcmf32[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
energy_all /= n_samples;
|
|
||||||
energy_last /= n_samples_last;
|
|
||||||
|
|
||||||
if (verbose) {
|
|
||||||
fprintf(stderr, "%s: energy_all: %f, energy_last: %f, vad_thold: %f, freq_thold: %f\n", __func__, energy_all, energy_last, vad_thold, freq_thold);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (energy_last > vad_thold*energy_all) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(int argc, char ** argv) {
|
int main(int argc, char ** argv) {
|
||||||
whisper_params params;
|
whisper_params params;
|
||||||
|
|
||||||
@ -426,10 +123,10 @@ int main(int argc, char ** argv) {
|
|||||||
params.keep_ms = std::min(params.keep_ms, params.step_ms);
|
params.keep_ms = std::min(params.keep_ms, params.step_ms);
|
||||||
params.length_ms = std::max(params.length_ms, params.step_ms);
|
params.length_ms = std::max(params.length_ms, params.step_ms);
|
||||||
|
|
||||||
const int n_samples_step = (params.step_ms *1e-3)*WHISPER_SAMPLE_RATE;
|
const int n_samples_step = (1e-3*params.step_ms )*WHISPER_SAMPLE_RATE;
|
||||||
const int n_samples_len = (params.length_ms*1e-3)*WHISPER_SAMPLE_RATE;
|
const int n_samples_len = (1e-3*params.length_ms)*WHISPER_SAMPLE_RATE;
|
||||||
const int n_samples_keep = (params.keep_ms *1e-3)*WHISPER_SAMPLE_RATE;
|
const int n_samples_keep = (1e-3*params.keep_ms )*WHISPER_SAMPLE_RATE;
|
||||||
const int n_samples_30s = (30000 *1e-3)*WHISPER_SAMPLE_RATE;
|
const int n_samples_30s = (1e-3*30000.0 )*WHISPER_SAMPLE_RATE;
|
||||||
|
|
||||||
const bool use_vad = n_samples_step <= 0; // sliding window mode uses VAD
|
const bool use_vad = n_samples_step <= 0; // sliding window mode uses VAD
|
||||||
|
|
||||||
@ -517,23 +214,7 @@ int main(int argc, char ** argv) {
|
|||||||
// main audio loop
|
// main audio loop
|
||||||
while (is_running) {
|
while (is_running) {
|
||||||
// handle Ctrl + C
|
// handle Ctrl + C
|
||||||
{
|
is_running = sdl_poll_events();
|
||||||
SDL_Event event;
|
|
||||||
while (SDL_PollEvent(&event)) {
|
|
||||||
switch (event.type) {
|
|
||||||
case SDL_QUIT:
|
|
||||||
{
|
|
||||||
is_running = false;
|
|
||||||
} break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!is_running) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!is_running) {
|
if (!is_running) {
|
||||||
break;
|
break;
|
||||||
@ -556,7 +237,7 @@ int main(int argc, char ** argv) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
SDL_Delay(1);
|
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
const int n_samples_new = pcmf32_new.size();
|
const int n_samples_new = pcmf32_new.size();
|
||||||
@ -587,7 +268,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
audio.get(2000, pcmf32_new);
|
audio.get(2000, pcmf32_new);
|
||||||
|
|
||||||
if (vad_simple(pcmf32_new, WHISPER_SAMPLE_RATE, 1000, params.vad_thold, params.freq_thold, false)) {
|
if (::vad_simple(pcmf32_new, WHISPER_SAMPLE_RATE, 1000, params.vad_thold, params.freq_thold, false)) {
|
||||||
audio.get(params.length_ms, pcmf32);
|
audio.get(params.length_ms, pcmf32);
|
||||||
} else {
|
} else {
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||||
@ -607,7 +288,6 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.print_realtime = false;
|
wparams.print_realtime = false;
|
||||||
wparams.print_timestamps = !params.no_timestamps;
|
wparams.print_timestamps = !params.no_timestamps;
|
||||||
wparams.translate = params.translate;
|
wparams.translate = params.translate;
|
||||||
wparams.no_context = true;
|
|
||||||
wparams.single_segment = !use_vad;
|
wparams.single_segment = !use_vad;
|
||||||
wparams.max_tokens = params.max_tokens;
|
wparams.max_tokens = params.max_tokens;
|
||||||
wparams.language = params.language.c_str();
|
wparams.language = params.language.c_str();
|
||||||
|
@ -7,7 +7,7 @@ if (WHISPER_SUPPORT_SDL2)
|
|||||||
|
|
||||||
# TODO: this is temporary
|
# TODO: this is temporary
|
||||||
# need to export ggml symbols for MSVC, but too lazy ..
|
# need to export ggml symbols for MSVC, but too lazy ..
|
||||||
add_executable(${TARGET} talk.cpp gpt-2.cpp ../../ggml.c ../../whisper.cpp)
|
add_executable(${TARGET} talk.cpp gpt-2.cpp ../common.cpp ../common-sdl.cpp ../../ggml.c ../../whisper.cpp)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
|
@ -1,16 +1,14 @@
|
|||||||
// Talk with AI
|
// Talk with AI
|
||||||
//
|
//
|
||||||
|
|
||||||
|
#include "common.h"
|
||||||
|
#include "common-sdl.h"
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
#include "gpt-2.h"
|
#include "gpt-2.h"
|
||||||
|
|
||||||
#include <SDL.h>
|
|
||||||
#include <SDL_audio.h>
|
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <mutex>
|
|
||||||
#include <regex>
|
#include <regex>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
@ -105,320 +103,6 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
// SDL Audio capture
|
|
||||||
//
|
|
||||||
|
|
||||||
class audio_async {
|
|
||||||
public:
|
|
||||||
audio_async(int len_ms);
|
|
||||||
~audio_async();
|
|
||||||
|
|
||||||
bool init(int capture_id, int sample_rate);
|
|
||||||
|
|
||||||
// start capturing audio via the provided SDL callback
|
|
||||||
// keep last len_ms seconds of audio in a circular buffer
|
|
||||||
bool resume();
|
|
||||||
bool pause();
|
|
||||||
bool clear();
|
|
||||||
|
|
||||||
// callback to be called by SDL
|
|
||||||
void callback(uint8_t * stream, int len);
|
|
||||||
|
|
||||||
// get audio data from the circular buffer
|
|
||||||
void get(int ms, std::vector<float> & audio);
|
|
||||||
|
|
||||||
private:
|
|
||||||
SDL_AudioDeviceID m_dev_id_in = 0;
|
|
||||||
|
|
||||||
int m_len_ms = 0;
|
|
||||||
int m_sample_rate = 0;
|
|
||||||
|
|
||||||
bool m_running = false;
|
|
||||||
std::mutex m_mutex;
|
|
||||||
|
|
||||||
std::vector<float> m_audio;
|
|
||||||
std::vector<float> m_audio_new;
|
|
||||||
size_t m_audio_pos = 0;
|
|
||||||
size_t m_audio_len = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
audio_async::audio_async(int len_ms) {
|
|
||||||
m_len_ms = len_ms;
|
|
||||||
}
|
|
||||||
|
|
||||||
audio_async::~audio_async() {
|
|
||||||
if (m_dev_id_in) {
|
|
||||||
SDL_CloseAudioDevice(m_dev_id_in);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool audio_async::init(int capture_id, int sample_rate) {
|
|
||||||
SDL_LogSetPriority(SDL_LOG_CATEGORY_APPLICATION, SDL_LOG_PRIORITY_INFO);
|
|
||||||
|
|
||||||
if (SDL_Init(SDL_INIT_AUDIO) < 0) {
|
|
||||||
SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Couldn't initialize SDL: %s\n", SDL_GetError());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_SetHintWithPriority(SDL_HINT_AUDIO_RESAMPLING_MODE, "medium", SDL_HINT_OVERRIDE);
|
|
||||||
|
|
||||||
{
|
|
||||||
int nDevices = SDL_GetNumAudioDevices(SDL_TRUE);
|
|
||||||
fprintf(stderr, "%s: found %d capture devices:\n", __func__, nDevices);
|
|
||||||
for (int i = 0; i < nDevices; i++) {
|
|
||||||
fprintf(stderr, "%s: - Capture device #%d: '%s'\n", __func__, i, SDL_GetAudioDeviceName(i, SDL_TRUE));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_AudioSpec capture_spec_requested;
|
|
||||||
SDL_AudioSpec capture_spec_obtained;
|
|
||||||
|
|
||||||
SDL_zero(capture_spec_requested);
|
|
||||||
SDL_zero(capture_spec_obtained);
|
|
||||||
|
|
||||||
capture_spec_requested.freq = sample_rate;
|
|
||||||
capture_spec_requested.format = AUDIO_F32;
|
|
||||||
capture_spec_requested.channels = 1;
|
|
||||||
capture_spec_requested.samples = 1024;
|
|
||||||
capture_spec_requested.callback = [](void * userdata, uint8_t * stream, int len) {
|
|
||||||
audio_async * audio = (audio_async *) userdata;
|
|
||||||
audio->callback(stream, len);
|
|
||||||
};
|
|
||||||
capture_spec_requested.userdata = this;
|
|
||||||
|
|
||||||
if (capture_id >= 0) {
|
|
||||||
fprintf(stderr, "%s: attempt to open capture device %d : '%s' ...\n", __func__, capture_id, SDL_GetAudioDeviceName(capture_id, SDL_TRUE));
|
|
||||||
m_dev_id_in = SDL_OpenAudioDevice(SDL_GetAudioDeviceName(capture_id, SDL_TRUE), SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
|
|
||||||
} else {
|
|
||||||
fprintf(stderr, "%s: attempt to open default capture device ...\n", __func__);
|
|
||||||
m_dev_id_in = SDL_OpenAudioDevice(nullptr, SDL_TRUE, &capture_spec_requested, &capture_spec_obtained, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: couldn't open an audio device for capture: %s!\n", __func__, SDL_GetError());
|
|
||||||
m_dev_id_in = 0;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
fprintf(stderr, "%s: obtained spec for input device (SDL Id = %d):\n", __func__, m_dev_id_in);
|
|
||||||
fprintf(stderr, "%s: - sample rate: %d\n", __func__, capture_spec_obtained.freq);
|
|
||||||
fprintf(stderr, "%s: - format: %d (required: %d)\n", __func__, capture_spec_obtained.format,
|
|
||||||
capture_spec_requested.format);
|
|
||||||
fprintf(stderr, "%s: - channels: %d (required: %d)\n", __func__, capture_spec_obtained.channels,
|
|
||||||
capture_spec_requested.channels);
|
|
||||||
fprintf(stderr, "%s: - samples per frame: %d\n", __func__, capture_spec_obtained.samples);
|
|
||||||
fprintf(stderr, "\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
m_sample_rate = capture_spec_obtained.freq;
|
|
||||||
|
|
||||||
m_audio.resize((m_sample_rate*m_len_ms)/1000);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool audio_async::resume() {
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: no audio device to resume!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (m_running) {
|
|
||||||
fprintf(stderr, "%s: already running!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_PauseAudioDevice(m_dev_id_in, 0);
|
|
||||||
|
|
||||||
m_running = true;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool audio_async::pause() {
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: no audio device to pause!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!m_running) {
|
|
||||||
fprintf(stderr, "%s: already paused!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
SDL_PauseAudioDevice(m_dev_id_in, 1);
|
|
||||||
|
|
||||||
m_running = false;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool audio_async::clear() {
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: no audio device to clear!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!m_running) {
|
|
||||||
fprintf(stderr, "%s: not running!\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
|
||||||
|
|
||||||
m_audio_pos = 0;
|
|
||||||
m_audio_len = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// callback to be called by SDL
|
|
||||||
void audio_async::callback(uint8_t * stream, int len) {
|
|
||||||
if (!m_running) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t n_samples = len / sizeof(float);
|
|
||||||
|
|
||||||
m_audio_new.resize(n_samples);
|
|
||||||
memcpy(m_audio_new.data(), stream, n_samples * sizeof(float));
|
|
||||||
|
|
||||||
//fprintf(stderr, "%s: %zu samples, pos %zu, len %zu\n", __func__, n_samples, m_audio_pos, m_audio_len);
|
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
|
||||||
|
|
||||||
if (m_audio_pos + n_samples > m_audio.size()) {
|
|
||||||
const size_t n0 = m_audio.size() - m_audio_pos;
|
|
||||||
|
|
||||||
memcpy(&m_audio[m_audio_pos], stream, n0 * sizeof(float));
|
|
||||||
memcpy(&m_audio[0], &stream[n0], (n_samples - n0) * sizeof(float));
|
|
||||||
|
|
||||||
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
|
||||||
m_audio_len = m_audio.size();
|
|
||||||
} else {
|
|
||||||
memcpy(&m_audio[m_audio_pos], stream, n_samples * sizeof(float));
|
|
||||||
|
|
||||||
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
|
||||||
m_audio_len = std::min(m_audio_len + n_samples, m_audio.size());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void audio_async::get(int ms, std::vector<float> & result) {
|
|
||||||
if (!m_dev_id_in) {
|
|
||||||
fprintf(stderr, "%s: no audio device to get audio from!\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!m_running) {
|
|
||||||
fprintf(stderr, "%s: not running!\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
result.clear();
|
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutex);
|
|
||||||
|
|
||||||
if (ms <= 0) {
|
|
||||||
ms = m_len_ms;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t n_samples = (m_sample_rate * ms) / 1000;
|
|
||||||
if (n_samples > m_audio_len) {
|
|
||||||
n_samples = m_audio_len;
|
|
||||||
}
|
|
||||||
|
|
||||||
result.resize(n_samples);
|
|
||||||
|
|
||||||
int s0 = m_audio_pos - n_samples;
|
|
||||||
if (s0 < 0) {
|
|
||||||
s0 += m_audio.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (s0 + n_samples > m_audio.size()) {
|
|
||||||
const size_t n0 = m_audio.size() - s0;
|
|
||||||
|
|
||||||
memcpy(result.data(), &m_audio[s0], n0 * sizeof(float));
|
|
||||||
memcpy(&result[n0], &m_audio[0], (n_samples - n0) * sizeof(float));
|
|
||||||
} else {
|
|
||||||
memcpy(result.data(), &m_audio[s0], n_samples * sizeof(float));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
///////////////////////////
|
|
||||||
|
|
||||||
std::string trim(const std::string & s) {
|
|
||||||
std::regex e("^\\s+|\\s+$");
|
|
||||||
return std::regex_replace(s, e, "");
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string replace(const std::string & s, const std::string & from, const std::string & to) {
|
|
||||||
std::string result = s;
|
|
||||||
size_t pos = 0;
|
|
||||||
while ((pos = result.find(from, pos)) != std::string::npos) {
|
|
||||||
result.replace(pos, from.length(), to);
|
|
||||||
pos += to.length();
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
void high_pass_filter(std::vector<float> & data, float cutoff, float sample_rate) {
|
|
||||||
const float rc = 1.0f / (2.0f * M_PI * cutoff);
|
|
||||||
const float dt = 1.0f / sample_rate;
|
|
||||||
const float alpha = dt / (rc + dt);
|
|
||||||
|
|
||||||
float y = data[0];
|
|
||||||
|
|
||||||
for (size_t i = 1; i < data.size(); i++) {
|
|
||||||
y = alpha * (y + data[i] - data[i - 1]);
|
|
||||||
data[i] = y;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool vad_simple(std::vector<float> & pcmf32, int sample_rate, int last_ms, float vad_thold, float freq_thold, bool verbose) {
|
|
||||||
const int n_samples = pcmf32.size();
|
|
||||||
const int n_samples_last = (sample_rate * last_ms) / 1000;
|
|
||||||
|
|
||||||
if (n_samples_last >= n_samples) {
|
|
||||||
// not enough samples - assume no speech
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (freq_thold > 0.0f) {
|
|
||||||
high_pass_filter(pcmf32, freq_thold, sample_rate);
|
|
||||||
}
|
|
||||||
|
|
||||||
float energy_all = 0.0f;
|
|
||||||
float energy_last = 0.0f;
|
|
||||||
|
|
||||||
for (int i = 0; i < n_samples; i++) {
|
|
||||||
energy_all += fabsf(pcmf32[i]);
|
|
||||||
|
|
||||||
if (i >= n_samples - n_samples_last) {
|
|
||||||
energy_last += fabsf(pcmf32[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
energy_all /= n_samples;
|
|
||||||
energy_last /= n_samples_last;
|
|
||||||
|
|
||||||
if (verbose) {
|
|
||||||
fprintf(stderr, "%s: energy_all: %f, energy_last: %f, vad_thold: %f, freq_thold: %f\n", __func__, energy_all, energy_last, vad_thold, freq_thold);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (energy_last > vad_thold*energy_all) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string transcribe(whisper_context * ctx, const whisper_params & params, const std::vector<float> & pcmf32, float & prob, int64_t & t_ms) {
|
std::string transcribe(whisper_context * ctx, const whisper_params & params, const std::vector<float> & pcmf32, float & prob, int64_t & t_ms) {
|
||||||
const auto t_start = std::chrono::high_resolution_clock::now();
|
const auto t_start = std::chrono::high_resolution_clock::now();
|
||||||
|
|
||||||
@ -557,22 +241,10 @@ int main(int argc, char ** argv) {
|
|||||||
// main loop
|
// main loop
|
||||||
while (is_running) {
|
while (is_running) {
|
||||||
// handle Ctrl + C
|
// handle Ctrl + C
|
||||||
{
|
is_running = sdl_poll_events();
|
||||||
SDL_Event event;
|
|
||||||
while (SDL_PollEvent(&event)) {
|
|
||||||
switch (event.type) {
|
|
||||||
case SDL_QUIT:
|
|
||||||
{
|
|
||||||
is_running = false;
|
|
||||||
} break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!is_running) {
|
if (!is_running) {
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// delay
|
// delay
|
||||||
@ -583,7 +255,7 @@ int main(int argc, char ** argv) {
|
|||||||
{
|
{
|
||||||
audio.get(2000, pcmf32_cur);
|
audio.get(2000, pcmf32_cur);
|
||||||
|
|
||||||
if (vad_simple(pcmf32_cur, WHISPER_SAMPLE_RATE, 1250, params.vad_thold, params.freq_thold, params.print_energy) || force_speak) {
|
if (::vad_simple(pcmf32_cur, WHISPER_SAMPLE_RATE, 1250, params.vad_thold, params.freq_thold, params.print_energy) || force_speak) {
|
||||||
fprintf(stdout, "%s: Speech detected! Processing ...\n", __func__);
|
fprintf(stdout, "%s: Speech detected! Processing ...\n", __func__);
|
||||||
|
|
||||||
audio.get(params.voice_ms, pcmf32_cur);
|
audio.get(params.voice_ms, pcmf32_cur);
|
||||||
|
@ -9,4 +9,4 @@ To use:
|
|||||||
5. Select the "release" active build variant, and use Android Studio to run and deploy to your device.
|
5. Select the "release" active build variant, and use Android Studio to run and deploy to your device.
|
||||||
[^1]: I recommend the tiny or base models for running on an Android device.
|
[^1]: I recommend the tiny or base models for running on an Android device.
|
||||||
|
|
||||||
<img width="300" alt="image" src="https://user-images.githubusercontent.com/1991296/208154256-82d972dc-221b-48c4-bfcb-36ce68602f93.png">
|
<img width="300" alt="image" src="https://user-images.githubusercontent.com/1670775/221613663-a17bf770-27ef-45ab-9a46-a5f99ba65d2a.jpg">
|
||||||
|
@ -2,6 +2,7 @@ package com.whispercppdemo.ui.main
|
|||||||
|
|
||||||
import androidx.compose.foundation.layout.*
|
import androidx.compose.foundation.layout.*
|
||||||
import androidx.compose.foundation.rememberScrollState
|
import androidx.compose.foundation.rememberScrollState
|
||||||
|
import androidx.compose.foundation.text.selection.SelectionContainer
|
||||||
import androidx.compose.foundation.verticalScroll
|
import androidx.compose.foundation.verticalScroll
|
||||||
import androidx.compose.material3.*
|
import androidx.compose.material3.*
|
||||||
import androidx.compose.runtime.Composable
|
import androidx.compose.runtime.Composable
|
||||||
@ -19,6 +20,7 @@ fun MainScreen(viewModel: MainScreenViewModel) {
|
|||||||
canTranscribe = viewModel.canTranscribe,
|
canTranscribe = viewModel.canTranscribe,
|
||||||
isRecording = viewModel.isRecording,
|
isRecording = viewModel.isRecording,
|
||||||
messageLog = viewModel.dataLog,
|
messageLog = viewModel.dataLog,
|
||||||
|
onBenchmarkTapped = viewModel::benchmark,
|
||||||
onTranscribeSampleTapped = viewModel::transcribeSample,
|
onTranscribeSampleTapped = viewModel::transcribeSample,
|
||||||
onRecordTapped = viewModel::toggleRecord
|
onRecordTapped = viewModel::toggleRecord
|
||||||
)
|
)
|
||||||
@ -30,6 +32,7 @@ private fun MainScreen(
|
|||||||
canTranscribe: Boolean,
|
canTranscribe: Boolean,
|
||||||
isRecording: Boolean,
|
isRecording: Boolean,
|
||||||
messageLog: String,
|
messageLog: String,
|
||||||
|
onBenchmarkTapped: () -> Unit,
|
||||||
onTranscribeSampleTapped: () -> Unit,
|
onTranscribeSampleTapped: () -> Unit,
|
||||||
onRecordTapped: () -> Unit
|
onRecordTapped: () -> Unit
|
||||||
) {
|
) {
|
||||||
@ -45,8 +48,11 @@ private fun MainScreen(
|
|||||||
.padding(innerPadding)
|
.padding(innerPadding)
|
||||||
.padding(16.dp)
|
.padding(16.dp)
|
||||||
) {
|
) {
|
||||||
Row(horizontalArrangement = Arrangement.SpaceBetween) {
|
Column(verticalArrangement = Arrangement.SpaceBetween) {
|
||||||
TranscribeSampleButton(enabled = canTranscribe, onClick = onTranscribeSampleTapped)
|
Row(horizontalArrangement = Arrangement.SpaceBetween, modifier = Modifier.fillMaxWidth()) {
|
||||||
|
BenchmarkButton(enabled = canTranscribe, onClick = onBenchmarkTapped)
|
||||||
|
TranscribeSampleButton(enabled = canTranscribe, onClick = onTranscribeSampleTapped)
|
||||||
|
}
|
||||||
RecordButton(
|
RecordButton(
|
||||||
enabled = canTranscribe,
|
enabled = canTranscribe,
|
||||||
isRecording = isRecording,
|
isRecording = isRecording,
|
||||||
@ -60,7 +66,16 @@ private fun MainScreen(
|
|||||||
|
|
||||||
@Composable
|
@Composable
|
||||||
private fun MessageLog(log: String) {
|
private fun MessageLog(log: String) {
|
||||||
Text(modifier = Modifier.verticalScroll(rememberScrollState()), text = log)
|
SelectionContainer() {
|
||||||
|
Text(modifier = Modifier.verticalScroll(rememberScrollState()), text = log)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Composable
|
||||||
|
private fun BenchmarkButton(enabled: Boolean, onClick: () -> Unit) {
|
||||||
|
Button(onClick = onClick, enabled = enabled) {
|
||||||
|
Text("Benchmark")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Composable
|
@Composable
|
||||||
|
@ -41,10 +41,15 @@ class MainScreenViewModel(private val application: Application) : ViewModel() {
|
|||||||
|
|
||||||
init {
|
init {
|
||||||
viewModelScope.launch {
|
viewModelScope.launch {
|
||||||
|
printSystemInfo()
|
||||||
loadData()
|
loadData()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private suspend fun printSystemInfo() {
|
||||||
|
printMessage(String.format("System Info: %s\n", WhisperContext.getSystemInfo()));
|
||||||
|
}
|
||||||
|
|
||||||
private suspend fun loadData() {
|
private suspend fun loadData() {
|
||||||
printMessage("Loading data...\n")
|
printMessage("Loading data...\n")
|
||||||
try {
|
try {
|
||||||
@ -81,10 +86,29 @@ class MainScreenViewModel(private val application: Application) : ViewModel() {
|
|||||||
//whisperContext = WhisperContext.createContextFromFile(firstModel.absolutePath)
|
//whisperContext = WhisperContext.createContextFromFile(firstModel.absolutePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun benchmark() = viewModelScope.launch {
|
||||||
|
runBenchmark(6)
|
||||||
|
}
|
||||||
|
|
||||||
fun transcribeSample() = viewModelScope.launch {
|
fun transcribeSample() = viewModelScope.launch {
|
||||||
transcribeAudio(getFirstSample())
|
transcribeAudio(getFirstSample())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private suspend fun runBenchmark(nthreads: Int) {
|
||||||
|
if (!canTranscribe) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
canTranscribe = false
|
||||||
|
|
||||||
|
printMessage("Running benchmark. This will take minutes...\n")
|
||||||
|
whisperContext?.benchMemory(nthreads)?.let{ printMessage(it) }
|
||||||
|
printMessage("\n")
|
||||||
|
whisperContext?.benchGgmlMulMat(nthreads)?.let{ printMessage(it) }
|
||||||
|
|
||||||
|
canTranscribe = true
|
||||||
|
}
|
||||||
|
|
||||||
private suspend fun getFirstSample(): File = withContext(Dispatchers.IO) {
|
private suspend fun getFirstSample(): File = withContext(Dispatchers.IO) {
|
||||||
samplesPath.listFiles()!!.first()
|
samplesPath.listFiles()!!.first()
|
||||||
}
|
}
|
||||||
@ -114,11 +138,14 @@ class MainScreenViewModel(private val application: Application) : ViewModel() {
|
|||||||
canTranscribe = false
|
canTranscribe = false
|
||||||
|
|
||||||
try {
|
try {
|
||||||
printMessage("Reading wave samples...\n")
|
printMessage("Reading wave samples... ")
|
||||||
val data = readAudioSamples(file)
|
val data = readAudioSamples(file)
|
||||||
|
printMessage("${data.size / (16000 / 1000)} ms\n")
|
||||||
printMessage("Transcribing data...\n")
|
printMessage("Transcribing data...\n")
|
||||||
|
val start = System.currentTimeMillis()
|
||||||
val text = whisperContext?.transcribeData(data)
|
val text = whisperContext?.transcribeData(data)
|
||||||
printMessage("Done: $text\n")
|
val elapsed = System.currentTimeMillis() - start
|
||||||
|
printMessage("Done ($elapsed ms): $text\n")
|
||||||
} catch (e: Exception) {
|
} catch (e: Exception) {
|
||||||
Log.w(LOG_TAG, e)
|
Log.w(LOG_TAG, e)
|
||||||
printMessage("${e.localizedMessage}\n")
|
printMessage("${e.localizedMessage}\n")
|
||||||
|
@ -27,6 +27,14 @@ class WhisperContext private constructor(private var ptr: Long) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
suspend fun benchMemory(nthreads: Int): String = withContext(scope.coroutineContext) {
|
||||||
|
return@withContext WhisperLib.benchMemcpy(nthreads)
|
||||||
|
}
|
||||||
|
|
||||||
|
suspend fun benchGgmlMulMat(nthreads: Int): String = withContext(scope.coroutineContext) {
|
||||||
|
return@withContext WhisperLib.benchGgmlMulMat(nthreads)
|
||||||
|
}
|
||||||
|
|
||||||
suspend fun release() = withContext(scope.coroutineContext) {
|
suspend fun release() = withContext(scope.coroutineContext) {
|
||||||
if (ptr != 0L) {
|
if (ptr != 0L) {
|
||||||
WhisperLib.freeContext(ptr)
|
WhisperLib.freeContext(ptr)
|
||||||
@ -66,6 +74,10 @@ class WhisperContext private constructor(private var ptr: Long) {
|
|||||||
}
|
}
|
||||||
return WhisperContext(ptr)
|
return WhisperContext(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun getSystemInfo(): String {
|
||||||
|
return WhisperLib.getSystemInfo()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,6 +86,7 @@ private class WhisperLib {
|
|||||||
init {
|
init {
|
||||||
Log.d(LOG_TAG, "Primary ABI: ${Build.SUPPORTED_ABIS[0]}")
|
Log.d(LOG_TAG, "Primary ABI: ${Build.SUPPORTED_ABIS[0]}")
|
||||||
var loadVfpv4 = false
|
var loadVfpv4 = false
|
||||||
|
var loadV8fp16 = false
|
||||||
if (isArmEabiV7a()) {
|
if (isArmEabiV7a()) {
|
||||||
// armeabi-v7a needs runtime detection support
|
// armeabi-v7a needs runtime detection support
|
||||||
val cpuInfo = cpuInfo()
|
val cpuInfo = cpuInfo()
|
||||||
@ -84,11 +97,24 @@ private class WhisperLib {
|
|||||||
loadVfpv4 = true
|
loadVfpv4 = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else if (isArmEabiV8a()) {
|
||||||
|
// ARMv8.2a needs runtime detection support
|
||||||
|
val cpuInfo = cpuInfo()
|
||||||
|
cpuInfo?.let {
|
||||||
|
Log.d(LOG_TAG, "CPU info: $cpuInfo")
|
||||||
|
if (cpuInfo.contains("fphp")) {
|
||||||
|
Log.d(LOG_TAG, "CPU supports fp16 arithmetic")
|
||||||
|
loadV8fp16 = true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (loadVfpv4) {
|
if (loadVfpv4) {
|
||||||
Log.d(LOG_TAG, "Loading libwhisper_vfpv4.so")
|
Log.d(LOG_TAG, "Loading libwhisper_vfpv4.so")
|
||||||
System.loadLibrary("whisper_vfpv4")
|
System.loadLibrary("whisper_vfpv4")
|
||||||
|
} else if (loadV8fp16) {
|
||||||
|
Log.d(LOG_TAG, "Loading libwhisper_v8fp16_va.so")
|
||||||
|
System.loadLibrary("whisper_v8fp16_va")
|
||||||
} else {
|
} else {
|
||||||
Log.d(LOG_TAG, "Loading libwhisper.so")
|
Log.d(LOG_TAG, "Loading libwhisper.so")
|
||||||
System.loadLibrary("whisper")
|
System.loadLibrary("whisper")
|
||||||
@ -103,6 +129,9 @@ private class WhisperLib {
|
|||||||
external fun fullTranscribe(contextPtr: Long, audioData: FloatArray)
|
external fun fullTranscribe(contextPtr: Long, audioData: FloatArray)
|
||||||
external fun getTextSegmentCount(contextPtr: Long): Int
|
external fun getTextSegmentCount(contextPtr: Long): Int
|
||||||
external fun getTextSegment(contextPtr: Long, index: Int): String
|
external fun getTextSegment(contextPtr: Long, index: Int): String
|
||||||
|
external fun getSystemInfo(): String
|
||||||
|
external fun benchMemcpy(nthread: Int): String
|
||||||
|
external fun benchGgmlMulMat(nthread: Int): String
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,6 +139,10 @@ private fun isArmEabiV7a(): Boolean {
|
|||||||
return Build.SUPPORTED_ABIS[0].equals("armeabi-v7a")
|
return Build.SUPPORTED_ABIS[0].equals("armeabi-v7a")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private fun isArmEabiV8a(): Boolean {
|
||||||
|
return Build.SUPPORTED_ABIS[0].equals("arm64-v8a")
|
||||||
|
}
|
||||||
|
|
||||||
private fun cpuInfo(): String? {
|
private fun cpuInfo(): String? {
|
||||||
return try {
|
return try {
|
||||||
File("/proc/cpuinfo").inputStream().bufferedReader().use {
|
File("/proc/cpuinfo").inputStream().bufferedReader().use {
|
||||||
|
@ -12,4 +12,15 @@ ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
|
|||||||
# https://android.googlesource.com/platform/ndk/+/master/sources/android/cpufeatures/cpu-features.h
|
# https://android.googlesource.com/platform/ndk/+/master/sources/android/cpufeatures/cpu-features.h
|
||||||
LOCAL_CFLAGS += -mfpu=neon-vfpv4
|
LOCAL_CFLAGS += -mfpu=neon-vfpv4
|
||||||
include $(BUILD_SHARED_LIBRARY)
|
include $(BUILD_SHARED_LIBRARY)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifeq ($(TARGET_ARCH_ABI),arm64-v8a)
|
||||||
|
include $(CLEAR_VARS)
|
||||||
|
LOCAL_MODULE := libwhisper_v8fp16_va
|
||||||
|
include $(LOCAL_PATH)/Whisper.mk
|
||||||
|
# Allow building NEON FMA code.
|
||||||
|
# https://android.googlesource.com/platform/ndk/+/master/sources/android/cpufeatures/cpu-features.h
|
||||||
|
LOCAL_CFLAGS += -march=armv8.2-a+fp16
|
||||||
|
include $(BUILD_SHARED_LIBRARY)
|
||||||
|
endif
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <sys/sysinfo.h>
|
#include <sys/sysinfo.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
#define UNUSED(x) (void)(x)
|
#define UNUSED(x) (void)(x)
|
||||||
#define TAG "JNI"
|
#define TAG "JNI"
|
||||||
@ -213,4 +214,30 @@ Java_com_whispercppdemo_whisper_WhisperLib_00024Companion_getTextSegment(
|
|||||||
const char *text = whisper_full_get_segment_text(context, index);
|
const char *text = whisper_full_get_segment_text(context, index);
|
||||||
jstring string = (*env)->NewStringUTF(env, text);
|
jstring string = (*env)->NewStringUTF(env, text);
|
||||||
return string;
|
return string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jstring JNICALL
|
||||||
|
Java_com_whispercppdemo_whisper_WhisperLib_00024Companion_getSystemInfo(
|
||||||
|
JNIEnv *env, jobject thiz
|
||||||
|
) {
|
||||||
|
UNUSED(thiz);
|
||||||
|
const char *sysinfo = whisper_print_system_info();
|
||||||
|
jstring string = (*env)->NewStringUTF(env, sysinfo);
|
||||||
|
return string;
|
||||||
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jstring JNICALL
|
||||||
|
Java_com_whispercppdemo_whisper_WhisperLib_00024Companion_benchMemcpy(JNIEnv *env, jobject thiz,
|
||||||
|
jint n_threads) {
|
||||||
|
UNUSED(thiz);
|
||||||
|
const char *bench_ggml_memcpy = whisper_bench_memcpy_str(n_threads);
|
||||||
|
jstring string = (*env)->NewStringUTF(env, bench_ggml_memcpy);
|
||||||
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jstring JNICALL
|
||||||
|
Java_com_whispercppdemo_whisper_WhisperLib_00024Companion_benchGgmlMulMat(JNIEnv *env, jobject thiz,
|
||||||
|
jint n_threads) {
|
||||||
|
UNUSED(thiz);
|
||||||
|
const char *bench_ggml_mul_mat = whisper_bench_ggml_mul_mat_str(n_threads);
|
||||||
|
jstring string = (*env)->NewStringUTF(env, bench_ggml_mul_mat);
|
||||||
|
}
|
||||||
|
@ -62,8 +62,8 @@
|
|||||||
<!-- radio button to select between file upload or microphone -->
|
<!-- radio button to select between file upload or microphone -->
|
||||||
<div id="input">
|
<div id="input">
|
||||||
Input:
|
Input:
|
||||||
<input type="radio" id="file" name="input" value="file" checked="checked" onchange="changeInput('file')" /> File
|
<input type="radio" id="file" name="input" value="file" checked="checked" onchange="changeInput('file')" /> <label for="file">File</label>
|
||||||
<input type="radio" id="mic" name="input" value="mic" onchange="changeInput('mic')" /> Microphone
|
<input type="radio" id="mic" name="input" value="mic" onchange="changeInput('mic')" /> <label for="mic">Microphone</label>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
@ -1,20 +1,10 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
# shellcheck disable=2086
|
||||||
# Small shell script to more easily automatically download and transcribe live stream VODs.
|
|
||||||
# This uses YT-DLP, ffmpeg and the CPP version of Whisper: https://github.com/ggerganov/whisper.cpp
|
|
||||||
# Use `./examples/yt-wsp.sh help` to print help info.
|
|
||||||
#
|
|
||||||
# Sample usage:
|
|
||||||
#
|
|
||||||
# git clone https://github.com/ggerganov/whisper.cpp
|
|
||||||
# cd whisper.cpp
|
|
||||||
# make
|
|
||||||
# ./examples/yt-wsp.sh https://www.youtube.com/watch?v=1234567890
|
|
||||||
#
|
|
||||||
|
|
||||||
# MIT License
|
# MIT License
|
||||||
|
|
||||||
# Copyright (c) 2022 Daniils Petrovs
|
# Copyright (c) 2022 Daniils Petrovs
|
||||||
|
# Copyright (c) 2023 Jennifer Capasso
|
||||||
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
@ -34,114 +24,178 @@
|
|||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
# SOFTWARE.
|
# SOFTWARE.
|
||||||
|
|
||||||
|
# Small shell script to more easily automatically download and transcribe live stream VODs.
|
||||||
|
# This uses YT-DLP, ffmpeg and the CPP version of Whisper: https://github.com/ggerganov/whisper.cpp
|
||||||
|
# Use `./examples/yt-wsp.sh help` to print help info.
|
||||||
|
#
|
||||||
|
# Sample usage:
|
||||||
|
#
|
||||||
|
# git clone https://github.com/ggerganov/whisper.cpp
|
||||||
|
# cd whisper.cpp
|
||||||
|
# make
|
||||||
|
# ./examples/yt-wsp.sh https://www.youtube.com/watch?v=1234567890
|
||||||
|
#
|
||||||
|
|
||||||
set -Eeuo pipefail
|
set -Eeuo pipefail
|
||||||
|
|
||||||
# You can find how to download models in the OG repo: https://github.com/ggerganov/whisper.cpp/#usage
|
# get script file location
|
||||||
MODEL_PATH="${MODEL_PATH:-models/ggml-base.en.bin}" # Set to a multilingual model if you want to translate from foreign lang to en
|
SCRIPT_PATH="$(realpath -e ${BASH_SOURCE[0]})";
|
||||||
WHISPER_EXECUTABLE="${WHISPER_EXECUTABLE:-whisper}" # Where to find the whisper.cpp executable
|
SCRIPT_DIR="${SCRIPT_PATH%/*}"
|
||||||
WHISPER_LANG="${WHISPER_LANG:-en}" # Set to desired lang to translate from
|
|
||||||
|
################################################################################
|
||||||
|
# Documentation on downloading models can be found in the whisper.cpp repo:
|
||||||
|
# https://github.com/ggerganov/whisper.cpp/#usage
|
||||||
|
#
|
||||||
|
# note: unless a multilingual model is specified, WHISPER_LANG will be ignored
|
||||||
|
# and the video will be transcribed as if the audio were in the English language
|
||||||
|
################################################################################
|
||||||
|
MODEL_PATH="${MODEL_PATH:-${SCRIPT_DIR}/../models/ggml-base.en.bin}"
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Where to find the whisper.cpp executable. default to the examples directory
|
||||||
|
# which holds this script in source control
|
||||||
|
################################################################################
|
||||||
|
WHISPER_EXECUTABLE="${WHISPER_EXECUTABLE:-${SCRIPT_DIR}/../main}";
|
||||||
|
|
||||||
|
# Set to desired language to be translated into english
|
||||||
|
WHISPER_LANG="${WHISPER_LANG:-en}";
|
||||||
|
|
||||||
|
# Default to 4 threads (this was most performant on my 2020 M1 MBP)
|
||||||
|
WHISPER_THREAD_COUNT="${WHISPER_THREAD_COUNT:-4}";
|
||||||
|
|
||||||
msg() {
|
msg() {
|
||||||
echo >&2 -e "${1-}"
|
echo >&2 -e "${1-}"
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup() {
|
cleanup() {
|
||||||
msg "Cleaning up..."
|
local -r clean_me="${1}";
|
||||||
rm -rf "${temp_dir}" "vod-resampled.wav" "vod-resampled.wav.srt"
|
|
||||||
|
if [ -d "${clean_me}" ]; then
|
||||||
|
msg "Cleaning up...";
|
||||||
|
rm -rf "${clean_me}";
|
||||||
|
else
|
||||||
|
msg "'${clean_me}' does not appear to be a directory!";
|
||||||
|
exit 1;
|
||||||
|
fi;
|
||||||
}
|
}
|
||||||
|
|
||||||
print_help() {
|
print_help() {
|
||||||
|
echo "################################################################################"
|
||||||
echo "Usage: ./examples/yt-wsp.sh <video_url>"
|
echo "Usage: ./examples/yt-wsp.sh <video_url>"
|
||||||
echo "See configurable env variables in the script"
|
echo "# See configurable env variables in the script; there are many!"
|
||||||
echo "This will produce an MP4 muxed file called res.mp4 in the working directory"
|
echo "# This script will produce an MP4 muxed file in the working directory; it will"
|
||||||
echo "Requirements: ffmpeg yt-dlp whisper"
|
echo "# be named for the title and id of the video."
|
||||||
echo "Whisper needs to be built into the main binary with make, then you can rename it to something like 'whisper' and add it to your PATH for convenience."
|
echo "# passing in https://youtu.be/VYJtb2YXae8 produces a file named";
|
||||||
echo "E.g. in the root of Whisper.cpp, run: 'make && cp ./main /usr/local/bin/whisper'"
|
echo "# 'Why_we_all_need_subtitles_now-VYJtb2YXae8-res.mp4'"
|
||||||
|
echo "# Requirements: ffmpeg yt-dlp whisper.cpp"
|
||||||
|
echo "################################################################################"
|
||||||
}
|
}
|
||||||
|
|
||||||
check_requirements() {
|
check_requirements() {
|
||||||
if ! command -v ffmpeg &>/dev/null; then
|
if ! command -v ffmpeg &>/dev/null; then
|
||||||
echo "ffmpeg is required (https://ffmpeg.org)."
|
echo "ffmpeg is required: https://ffmpeg.org";
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi;
|
||||||
|
|
||||||
if ! command -v yt-dlp &>/dev/null; then
|
if ! command -v yt-dlp &>/dev/null; then
|
||||||
echo "yt-dlp is required (https://github.com/yt-dlp/yt-dlp)."
|
echo "yt-dlp is required: https://github.com/yt-dlp/yt-dlp";
|
||||||
exit 1
|
exit 1;
|
||||||
fi
|
fi;
|
||||||
|
|
||||||
|
if ! command -v "${WHISPER_EXECUTABLE}" &>/dev/null; then
|
||||||
|
echo "The C++ implementation of Whisper is required: https://github.com/ggerganov/whisper.cpp"
|
||||||
|
echo "Sample usage:";
|
||||||
|
echo "";
|
||||||
|
echo " git clone https://github.com/ggerganov/whisper.cpp";
|
||||||
|
echo " cd whisper.cpp";
|
||||||
|
echo " make";
|
||||||
|
echo " ./examples/yt-wsp.sh https://www.youtube.com/watch?v=1234567890";
|
||||||
|
echo "";
|
||||||
|
exit 1;
|
||||||
|
fi;
|
||||||
|
|
||||||
if ! command -v "$WHISPER_EXECUTABLE" &>/dev/null; then
|
|
||||||
WHISPER_EXECUTABLE="./main"
|
|
||||||
if ! command -v "$WHISPER_EXECUTABLE" &>/dev/null; then
|
|
||||||
echo "Whisper is required (https://github.com/ggerganov/whisper.cpp):"
|
|
||||||
echo "Sample usage:"
|
|
||||||
echo ""
|
|
||||||
echo " git clone https://github.com/ggerganov/whisper.cpp"
|
|
||||||
echo " cd whisper.cpp"
|
|
||||||
echo " make"
|
|
||||||
echo " ./examples/yt-wsp.sh https://www.youtube.com/watch?v=1234567890"
|
|
||||||
echo ""
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if [[ $# -lt 1 ]]; then
|
if [[ "${#}" -lt 1 ]]; then
|
||||||
print_help
|
print_help;
|
||||||
exit 1
|
exit 1;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$1" == "help" ]]; then
|
if [[ "${1##-*}" == "help" ]]; then
|
||||||
print_help
|
print_help;
|
||||||
exit 0
|
exit 0;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
temp_dir="tmp"
|
check_requirements;
|
||||||
source_url="$1"
|
|
||||||
|
|
||||||
check_requirements
|
################################################################################
|
||||||
|
# create a temporary directory to work in
|
||||||
|
# set the temp_dir and temp_filename variables
|
||||||
|
################################################################################
|
||||||
|
temp_dir="$(mktemp -d ${SCRIPT_DIR}/tmp.XXXXXX)";
|
||||||
|
temp_filename="${temp_dir}/yt-dlp-filename";
|
||||||
|
|
||||||
msg "Downloading VOD..."
|
################################################################################
|
||||||
|
# for now we only take one argument
|
||||||
|
# TODO: a for loop
|
||||||
|
################################################################################
|
||||||
|
source_url="${1}"
|
||||||
|
title_name="";
|
||||||
|
|
||||||
# Optionally add --cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER] for members only VODs
|
msg "Downloading VOD...";
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Download the video, put the dynamic output filename into a variable.
|
||||||
|
# Optionally add --cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER]
|
||||||
|
# for videos only available to logged-in users.
|
||||||
|
################################################################################
|
||||||
yt-dlp \
|
yt-dlp \
|
||||||
-f "bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best" \
|
-f "bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best" \
|
||||||
|
-o "${temp_dir}/%(title)s-%(id)s.vod.mp4" \
|
||||||
|
--print-to-file "%(filename)s" "${temp_filename}" \
|
||||||
|
--no-simulate \
|
||||||
|
--no-write-auto-subs \
|
||||||
|
--restrict-filenames \
|
||||||
--embed-thumbnail \
|
--embed-thumbnail \
|
||||||
--embed-chapters \
|
--embed-chapters \
|
||||||
--xattrs \
|
--xattrs \
|
||||||
"${source_url}" -o "${temp_dir}/vod.mp4"
|
"${source_url}";
|
||||||
|
|
||||||
msg "Extracting audio and resampling..."
|
title_name="$(xargs basename -s .vod.mp4 < ${temp_filename})";
|
||||||
|
|
||||||
ffmpeg -i "${temp_dir}/vod.mp4" \
|
msg "Extracting audio and resampling...";
|
||||||
|
|
||||||
|
ffmpeg -i "${temp_dir}/${title_name}.vod.mp4" \
|
||||||
-hide_banner \
|
-hide_banner \
|
||||||
|
-vn \
|
||||||
-loglevel error \
|
-loglevel error \
|
||||||
-ar 16000 \
|
-ar 16000 \
|
||||||
-ac 1 \
|
-ac 1 \
|
||||||
-c:a \
|
-c:a pcm_s16le \
|
||||||
pcm_s16le -y "vod-resampled.wav"
|
-y \
|
||||||
|
"${temp_dir}/${title_name}.vod-resampled.wav";
|
||||||
|
|
||||||
msg "Transcribing to subtitle file..."
|
msg "Transcribing to subtitle file...";
|
||||||
msg "Whisper specified at: ${WHISPER_EXECUTABLE}"
|
msg "Whisper specified at: '${WHISPER_EXECUTABLE}'";
|
||||||
|
|
||||||
$WHISPER_EXECUTABLE \
|
"${WHISPER_EXECUTABLE}" \
|
||||||
-m "${MODEL_PATH}" \
|
-m "${MODEL_PATH}" \
|
||||||
-l "${WHISPER_LANG}" \
|
-l "${WHISPER_LANG}" \
|
||||||
-f "vod-resampled.wav" \
|
-f "${temp_dir}/${title_name}.vod-resampled.wav" \
|
||||||
-t 8 \
|
-t "${WHISPER_THREAD_COUNT}" \
|
||||||
-osrt \
|
-osrt \
|
||||||
--translate
|
--translate;
|
||||||
|
|
||||||
msg "Embedding subtitle track..."
|
msg "Embedding subtitle track...";
|
||||||
|
|
||||||
ffmpeg -i "${temp_dir}/vod.mp4" \
|
ffmpeg -i "${temp_dir}/${title_name}.vod.mp4" \
|
||||||
-hide_banner \
|
-hide_banner \
|
||||||
-loglevel error \
|
-loglevel error \
|
||||||
-i "vod-resampled.wav.srt" \
|
-i "${temp_dir}/${title_name}.vod-resampled.wav.srt" \
|
||||||
-c copy \
|
-c copy \
|
||||||
-c:s mov_text \
|
-c:s mov_text \
|
||||||
-y res.mp4
|
-y "${title_name}-res.mp4";
|
||||||
|
|
||||||
cleanup
|
#cleanup "${temp_dir}";
|
||||||
|
|
||||||
msg "Done! Your finished file is ready: res.mp4"
|
msg "Done! Your finished file is ready: ${title_name}-res.mp4";
|
||||||
|
70
extra/bench-wts.sh
Executable file
70
extra/bench-wts.sh
Executable file
@ -0,0 +1,70 @@
|
|||||||
|
# Benchmark word-level timestamps for different models
|
||||||
|
#
|
||||||
|
# This script takes two arguments
|
||||||
|
# - an audio file
|
||||||
|
# - [optional] path to a font file
|
||||||
|
|
||||||
|
# I'm using "/usr/share/fonts/truetype/freefont/FreeMono.ttf" on Ubuntu
|
||||||
|
|
||||||
|
if [ -z "$1" ]; then
|
||||||
|
echo "Usage: $0 <audio file> [font file]"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
#TODO: Make this a command line parameter
|
||||||
|
#models="base small large"
|
||||||
|
#models="tiny.en tiny base.en base small.en small medium.en medium large-v1 large"
|
||||||
|
models="tiny.en base.en small.en medium.en large"
|
||||||
|
|
||||||
|
DURATION=$(ffprobe -i $1 -show_entries format=duration -v quiet -of csv="p=0")
|
||||||
|
DURATION=$(printf "%.2f" $DURATION)
|
||||||
|
echo "Input file duration: ${DURATION}s"
|
||||||
|
|
||||||
|
for model in $models; do
|
||||||
|
echo "Running $model"
|
||||||
|
COMMAND="./main -m models/ggml-$model.bin -owts -f $1 -of $1.$model"
|
||||||
|
|
||||||
|
if [ ! -z "$2" ]; then
|
||||||
|
COMMAND="$COMMAND -fp $2"
|
||||||
|
fi
|
||||||
|
#TODO: Surface errors better
|
||||||
|
# TIMEFMT is for zsh, TIMEFORMAT is for bash
|
||||||
|
EXECTIME=$({ TIMEFMT="%E";TIMEFORMAT=%E; time $COMMAND >/dev/null 2>&1; } 2>&1)
|
||||||
|
|
||||||
|
# Slightly different formats between zsh and bash
|
||||||
|
if [ "${EXECTIME: -1}" == "s" ]; then
|
||||||
|
EXECTIME=${EXECTIME::-1}
|
||||||
|
fi
|
||||||
|
|
||||||
|
RATIO=$(echo "$DURATION / $EXECTIME" | bc -l)
|
||||||
|
RATIO=$(printf "%.2f" $RATIO)
|
||||||
|
|
||||||
|
echo "Execution time: ${EXECTIME}s (${RATIO}x realtime)"
|
||||||
|
|
||||||
|
# If the file already exists, delete it
|
||||||
|
if [ -f $1.mp4 ]; then
|
||||||
|
rm $1.mp4
|
||||||
|
fi
|
||||||
|
|
||||||
|
bash $1.$model.wts >/dev/null 2>&1
|
||||||
|
mv $1.mp4 $1.$model.mp4
|
||||||
|
|
||||||
|
ffmpeg -y -f lavfi -i color=c=black:s=1200x50:d=$DURATION -vf "drawtext=fontfile=$2:fontsize=36:x=10:y=(h-text_h)/2:text='ggml-$model - ${EXECTIME}s (${RATIO}x realtime)':fontcolor=lightgrey" $1.$model.info.mp4 >/dev/null 2>&1
|
||||||
|
done
|
||||||
|
|
||||||
|
COMMAND="ffmpeg -y"
|
||||||
|
for model in $models; do
|
||||||
|
COMMAND="$COMMAND -i $1.$model.info.mp4 -i $1.$model.mp4"
|
||||||
|
done
|
||||||
|
COMMAND="$COMMAND -filter_complex \""
|
||||||
|
COUNT=0
|
||||||
|
for model in $models; do
|
||||||
|
COMMAND="$COMMAND[${COUNT}:v][$(($COUNT+1)):v]"
|
||||||
|
COUNT=$((COUNT+2))
|
||||||
|
done
|
||||||
|
COMMAND="$COMMAND vstack=inputs=${COUNT}[v]\" -map \"[v]\" -map 1:a $1.all.mp4 >/dev/null 2>&1"
|
||||||
|
|
||||||
|
echo $COMMAND
|
||||||
|
|
||||||
|
# Run the command
|
||||||
|
eval $COMMAND
|
138
ggml.c
138
ggml.c
@ -79,7 +79,7 @@ typedef void* thread_ret_t;
|
|||||||
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*#define GGML_PERF*/
|
#define GGML_PERF
|
||||||
#define GGML_DEBUG 0
|
#define GGML_DEBUG 0
|
||||||
#define GGML_GELU_FP16
|
#define GGML_GELU_FP16
|
||||||
|
|
||||||
@ -339,8 +339,12 @@ int64_t ggml_cycles_per_ms(void) {
|
|||||||
#if defined(__cpp_lib_hardware_interference_size)
|
#if defined(__cpp_lib_hardware_interference_size)
|
||||||
#define CACHE_LINE_SIZE hardware_destructive_interference_size
|
#define CACHE_LINE_SIZE hardware_destructive_interference_size
|
||||||
#else
|
#else
|
||||||
|
#if defined(__POWER9_VECTOR__)
|
||||||
|
#define CACHE_LINE_SIZE 128
|
||||||
|
#else
|
||||||
#define CACHE_LINE_SIZE 64
|
#define CACHE_LINE_SIZE 64
|
||||||
#endif
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
|
static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
|
||||||
|
|
||||||
@ -609,9 +613,12 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
|
|||||||
#define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
|
#define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \
|
||||||
vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
|
vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \
|
||||||
vec_extract_fp32_from_shortl(vec_xl(0, p))
|
vec_extract_fp32_from_shortl(vec_xl(0, p))
|
||||||
#define GGML_F16_VEC_STORE(p, r, i) \
|
#define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i]
|
||||||
if (i & 0x1) \
|
#define GGML_F16_VEC_STORE(p, r, i) \
|
||||||
vec_xst(vec_pack_to_short_fp32(r[i], r[i - 1]), 0, p - GGML_F16_EPR)
|
if (i & 0x1) \
|
||||||
|
vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \
|
||||||
|
r[i - GGML_ENDIAN_BYTE(0)]), \
|
||||||
|
0, p - GGML_F16_EPR)
|
||||||
|
|
||||||
#elif defined(__wasm_simd128__)
|
#elif defined(__wasm_simd128__)
|
||||||
|
|
||||||
@ -1251,7 +1258,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
|||||||
//
|
//
|
||||||
|
|
||||||
struct ggml_object {
|
struct ggml_object {
|
||||||
size_t offset;
|
size_t offs;
|
||||||
size_t size;
|
size_t size;
|
||||||
|
|
||||||
struct ggml_object * next;
|
struct ggml_object * next;
|
||||||
@ -1277,6 +1284,9 @@ struct ggml_context {
|
|||||||
|
|
||||||
struct ggml_object * objects_begin;
|
struct ggml_object * objects_begin;
|
||||||
struct ggml_object * objects_end;
|
struct ggml_object * objects_end;
|
||||||
|
|
||||||
|
struct ggml_scratch scratch;
|
||||||
|
struct ggml_scratch scratch_save;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ggml_context_container {
|
struct ggml_context_container {
|
||||||
@ -1339,7 +1349,7 @@ inline static void ggml_critical_section_end(void) {
|
|||||||
|
|
||||||
void ggml_print_object(const struct ggml_object * obj) {
|
void ggml_print_object(const struct ggml_object * obj) {
|
||||||
GGML_PRINT(" - ggml_object: offset = %zu, size = %zu, next = %p\n",
|
GGML_PRINT(" - ggml_object: offset = %zu, size = %zu, next = %p\n",
|
||||||
obj->offset, obj->size, (const void *) obj->next);
|
obj->offs, obj->size, (const void *) obj->next);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_print_objects(const struct ggml_context * ctx) {
|
void ggml_print_objects(const struct ggml_context * ctx) {
|
||||||
@ -1535,12 +1545,14 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
*ctx = (struct ggml_context) {
|
*ctx = (struct ggml_context) {
|
||||||
.mem_size = params.mem_size,
|
/*.mem_size =*/ params.mem_size,
|
||||||
.mem_buffer = params.mem_buffer ? params.mem_buffer : malloc(params.mem_size),
|
/*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : malloc(params.mem_size),
|
||||||
.mem_buffer_owned = params.mem_buffer ? false : true,
|
/*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
|
||||||
.n_objects = 0,
|
/*.n_objects =*/ 0,
|
||||||
.objects_begin = NULL,
|
/*.objects_begin =*/ NULL,
|
||||||
.objects_end = NULL,
|
/*.objects_end =*/ NULL,
|
||||||
|
/*.scratch =*/ { 0, 0, NULL, },
|
||||||
|
/*.scratch_save =*/ { 0, 0, NULL, },
|
||||||
};
|
};
|
||||||
|
|
||||||
ggml_assert_aligned(ctx->mem_buffer);
|
ggml_assert_aligned(ctx->mem_buffer);
|
||||||
@ -1563,7 +1575,7 @@ void ggml_free(struct ggml_context * ctx) {
|
|||||||
g_state.contexts[i].used = false;
|
g_state.contexts[i].used = false;
|
||||||
|
|
||||||
GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n",
|
GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n",
|
||||||
__func__, i, ctx->n_objects, ctx->objects_end->offset + ctx->objects_end->size);
|
__func__, i, ctx->n_objects, ctx->objects_end->offs + ctx->objects_end->size);
|
||||||
|
|
||||||
if (ctx->mem_buffer_owned) {
|
if (ctx->mem_buffer_owned) {
|
||||||
free(ctx->mem_buffer);
|
free(ctx->mem_buffer);
|
||||||
@ -1582,7 +1594,15 @@ void ggml_free(struct ggml_context * ctx) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t ggml_used_mem(const struct ggml_context * ctx) {
|
size_t ggml_used_mem(const struct ggml_context * ctx) {
|
||||||
return ctx->objects_end->offset + ctx->objects_end->size;
|
return ctx->objects_end->offs + ctx->objects_end->size;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) {
|
||||||
|
const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0;
|
||||||
|
|
||||||
|
ctx->scratch = scratch;
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -1596,9 +1616,9 @@ struct ggml_tensor * ggml_new_tensor_impl(
|
|||||||
// always insert objects at the end of the context's memory pool
|
// always insert objects at the end of the context's memory pool
|
||||||
struct ggml_object * obj_cur = ctx->objects_end;
|
struct ggml_object * obj_cur = ctx->objects_end;
|
||||||
|
|
||||||
const size_t cur_offset = obj_cur == NULL ? 0 : obj_cur->offset;
|
const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs;
|
||||||
const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
|
const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
|
||||||
const size_t cur_end = cur_offset + cur_size;
|
const size_t cur_end = cur_offs + cur_size;
|
||||||
|
|
||||||
size_t size_needed = 0;
|
size_t size_needed = 0;
|
||||||
|
|
||||||
@ -1609,25 +1629,52 @@ struct ggml_tensor * ggml_new_tensor_impl(
|
|||||||
}
|
}
|
||||||
// align to GGML_MEM_ALIGN
|
// align to GGML_MEM_ALIGN
|
||||||
size_needed = ((size_needed + GGML_MEM_ALIGN - 1)/GGML_MEM_ALIGN)*GGML_MEM_ALIGN;
|
size_needed = ((size_needed + GGML_MEM_ALIGN - 1)/GGML_MEM_ALIGN)*GGML_MEM_ALIGN;
|
||||||
|
|
||||||
}
|
|
||||||
size_needed += sizeof(struct ggml_tensor);
|
|
||||||
|
|
||||||
if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
|
|
||||||
GGML_PRINT("%s: not enough space in the context's memory pool\n", __func__);
|
|
||||||
assert(false);
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
char * const mem_buffer = ctx->mem_buffer;
|
char * const mem_buffer = ctx->mem_buffer;
|
||||||
|
|
||||||
struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
|
struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
|
||||||
|
|
||||||
*obj_new = (struct ggml_object) {
|
if (ctx->scratch.data == NULL || data != NULL) {
|
||||||
.offset = cur_end + GGML_OBJECT_SIZE,
|
size_needed += sizeof(struct ggml_tensor);
|
||||||
.size = size_needed,
|
|
||||||
.next = NULL,
|
if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
|
||||||
};
|
GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
|
||||||
|
__func__, cur_end + size_needed + GGML_OBJECT_SIZE, ctx->mem_size);
|
||||||
|
assert(false);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj_new = (struct ggml_object) {
|
||||||
|
.offs = cur_end + GGML_OBJECT_SIZE,
|
||||||
|
.size = size_needed,
|
||||||
|
.next = NULL,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
if (ctx->scratch.offs + size_needed > ctx->scratch.size) {
|
||||||
|
GGML_PRINT("%s: not enough space in the scratch memory\n", __func__);
|
||||||
|
assert(false);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur_end + sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE > ctx->mem_size) {
|
||||||
|
GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n",
|
||||||
|
__func__, cur_end + sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE, ctx->mem_size);
|
||||||
|
assert(false);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
data = (char * const) ctx->scratch.data + ctx->scratch.offs;
|
||||||
|
|
||||||
|
*obj_new = (struct ggml_object) {
|
||||||
|
.offs = cur_end + GGML_OBJECT_SIZE,
|
||||||
|
.size = sizeof(struct ggml_tensor),
|
||||||
|
.next = NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
//printf("scratch offs = %zu, size_needed = %zu\n", ctx->scratch.offs, size_needed);
|
||||||
|
|
||||||
|
ctx->scratch.offs += size_needed;
|
||||||
|
}
|
||||||
|
|
||||||
if (obj_cur != NULL) {
|
if (obj_cur != NULL) {
|
||||||
obj_cur->next = obj_new;
|
obj_cur->next = obj_new;
|
||||||
@ -1638,9 +1685,9 @@ struct ggml_tensor * ggml_new_tensor_impl(
|
|||||||
|
|
||||||
ctx->objects_end = obj_new;
|
ctx->objects_end = obj_new;
|
||||||
|
|
||||||
//GGML_PRINT_DEBUG("%s: inserted new object at %zu\n", __func__, cur_end);
|
//printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size);
|
||||||
|
|
||||||
struct ggml_tensor * const result = (struct ggml_tensor *)(mem_buffer + obj_new->offset);
|
struct ggml_tensor * const result = (struct ggml_tensor *)(mem_buffer + obj_new->offs);
|
||||||
|
|
||||||
ggml_assert_aligned(result);
|
ggml_assert_aligned(result);
|
||||||
|
|
||||||
@ -1683,7 +1730,7 @@ struct ggml_tensor * ggml_new_tensor(
|
|||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
enum ggml_type type,
|
enum ggml_type type,
|
||||||
int n_dims,
|
int n_dims,
|
||||||
const int* ne) {
|
const int * ne) {
|
||||||
return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL);
|
return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1725,16 +1772,26 @@ struct ggml_tensor * ggml_new_tensor_4d(
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
|
struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
|
||||||
|
ctx->scratch_save = ctx->scratch;
|
||||||
|
ctx->scratch.data = NULL;
|
||||||
|
|
||||||
struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
|
struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
|
||||||
|
|
||||||
|
ctx->scratch = ctx->scratch_save;
|
||||||
|
|
||||||
ggml_set_i32(result, value);
|
ggml_set_i32(result, value);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
|
struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
|
||||||
|
ctx->scratch_save = ctx->scratch;
|
||||||
|
ctx->scratch.data = NULL;
|
||||||
|
|
||||||
struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
|
struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
|
||||||
|
|
||||||
|
ctx->scratch = ctx->scratch_save;
|
||||||
|
|
||||||
ggml_set_f32(result, value);
|
ggml_set_f32(result, value);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
@ -2343,7 +2400,7 @@ struct ggml_tensor * ggml_repeat(
|
|||||||
result->op = GGML_OP_REPEAT;
|
result->op = GGML_OP_REPEAT;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
result->src0 = a;
|
result->src0 = a;
|
||||||
result->src1 = NULL;
|
result->src1 = b;
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -2959,9 +3016,7 @@ struct ggml_tensor * ggml_diag_mask_inf(
|
|||||||
// TODO: when implement backward, fix this:
|
// TODO: when implement backward, fix this:
|
||||||
//struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
//struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||||
struct ggml_tensor * result = ggml_view_tensor(ctx, a);
|
struct ggml_tensor * result = ggml_view_tensor(ctx, a);
|
||||||
|
struct ggml_tensor * b = ggml_new_i32(ctx, n_past);
|
||||||
struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
|
|
||||||
((int32_t *) b->data)[0] = n_past;
|
|
||||||
|
|
||||||
result->op = GGML_OP_DIAG_MASK_INF;
|
result->op = GGML_OP_DIAG_MASK_INF;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
@ -4293,7 +4348,9 @@ static bool ggml_compute_forward_mul_mat_use_blas(
|
|||||||
const int ne1 = dst->ne[1];
|
const int ne1 = dst->ne[1];
|
||||||
|
|
||||||
// TODO: find the optimal values for these
|
// TODO: find the optimal values for these
|
||||||
if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ne0 >= 32 && ne1 >= 32 && ne10 >= 32) {
|
if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && (
|
||||||
|
(ne0 >= 32 && ne1 >= 32 && ne10 >= 32)
|
||||||
|
)) {
|
||||||
//printf("BLAS: %d %d %d\n", ne0, ne1, ne10);
|
//printf("BLAS: %d %d %d\n", ne0, ne1, ne10);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -7282,6 +7339,9 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
|
|||||||
node->n_tasks = 1; // TODO: this actually is doing nothing
|
node->n_tasks = 1; // TODO: this actually is doing nothing
|
||||||
// the threads are still spinning
|
// the threads are still spinning
|
||||||
cur = sizeof(float)*(node->src0->ne[0]*node->src0->ne[1]);
|
cur = sizeof(float)*(node->src0->ne[0]*node->src0->ne[1]);
|
||||||
|
//printf("src0: ne0 = %d, ne1 = %d, ne = %d\n", node->src0->ne[0], node->src0->ne[1], node->src0->ne[0]*node->src0->ne[1]);
|
||||||
|
//printf("src1: ne0 = %d, ne1 = %d, ne = %d\n", node->src1->ne[0], node->src1->ne[1], node->src1->ne[0]*node->src1->ne[1]);
|
||||||
|
//printf("cur = %zu\n", cur);
|
||||||
} else {
|
} else {
|
||||||
cur = sizeof(ggml_fp16_t)*ggml_nelements(node->src1);
|
cur = sizeof(ggml_fp16_t)*ggml_nelements(node->src1);
|
||||||
}
|
}
|
||||||
|
9
ggml.h
9
ggml.h
@ -301,6 +301,13 @@ struct ggml_cgraph {
|
|||||||
int64_t perf_time_us;
|
int64_t perf_time_us;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// scratch buffer
|
||||||
|
struct ggml_scratch {
|
||||||
|
size_t offs;
|
||||||
|
size_t size;
|
||||||
|
void * data;
|
||||||
|
};
|
||||||
|
|
||||||
struct ggml_init_params {
|
struct ggml_init_params {
|
||||||
// memory pool
|
// memory pool
|
||||||
size_t mem_size; // bytes
|
size_t mem_size; // bytes
|
||||||
@ -327,6 +334,8 @@ void ggml_free(struct ggml_context * ctx);
|
|||||||
|
|
||||||
size_t ggml_used_mem(const struct ggml_context * ctx);
|
size_t ggml_used_mem(const struct ggml_context * ctx);
|
||||||
|
|
||||||
|
size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch);
|
||||||
|
|
||||||
struct ggml_tensor * ggml_new_tensor(
|
struct ggml_tensor * ggml_new_tensor(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
enum ggml_type type,
|
enum ggml_type type,
|
||||||
|
1808
whisper.cpp
1808
whisper.cpp
File diff suppressed because it is too large
Load Diff
171
whisper.h
171
whisper.h
@ -66,6 +66,7 @@ extern "C" {
|
|||||||
//
|
//
|
||||||
|
|
||||||
struct whisper_context;
|
struct whisper_context;
|
||||||
|
struct whisper_state;
|
||||||
|
|
||||||
typedef int whisper_token;
|
typedef int whisper_token;
|
||||||
|
|
||||||
@ -101,11 +102,20 @@ extern "C" {
|
|||||||
WHISPER_API struct whisper_context * whisper_init_from_buffer(void * buffer, size_t buffer_size);
|
WHISPER_API struct whisper_context * whisper_init_from_buffer(void * buffer, size_t buffer_size);
|
||||||
WHISPER_API struct whisper_context * whisper_init(struct whisper_model_loader * loader);
|
WHISPER_API struct whisper_context * whisper_init(struct whisper_model_loader * loader);
|
||||||
|
|
||||||
// Frees all memory allocated by the model.
|
// These are the same as the above, but the internal state of the context is not allocated automatically
|
||||||
WHISPER_API void whisper_free(struct whisper_context * ctx);
|
// It is the responsibility of the caller to allocate the state using whisper_init_state() (#523)
|
||||||
|
WHISPER_API struct whisper_context * whisper_init_from_file_no_state(const char * path_model);
|
||||||
|
WHISPER_API struct whisper_context * whisper_init_from_buffer_no_state(void * buffer, size_t buffer_size);
|
||||||
|
WHISPER_API struct whisper_context * whisper_init_no_state(struct whisper_model_loader * loader);
|
||||||
|
|
||||||
|
WHISPER_API struct whisper_state * whisper_init_state(struct whisper_context * ctx);
|
||||||
|
|
||||||
|
// Frees all allocated memory
|
||||||
|
WHISPER_API void whisper_free (struct whisper_context * ctx);
|
||||||
|
WHISPER_API void whisper_free_state(struct whisper_state * state);
|
||||||
|
|
||||||
// Convert RAW PCM audio to log mel spectrogram.
|
// Convert RAW PCM audio to log mel spectrogram.
|
||||||
// The resulting spectrogram is stored inside the provided whisper context.
|
// The resulting spectrogram is stored inside the default state of the provided whisper context.
|
||||||
// Returns 0 on success
|
// Returns 0 on success
|
||||||
WHISPER_API int whisper_pcm_to_mel(
|
WHISPER_API int whisper_pcm_to_mel(
|
||||||
struct whisper_context * ctx,
|
struct whisper_context * ctx,
|
||||||
@ -113,7 +123,30 @@ extern "C" {
|
|||||||
int n_samples,
|
int n_samples,
|
||||||
int n_threads);
|
int n_threads);
|
||||||
|
|
||||||
// This can be used to set a custom log mel spectrogram inside the provided whisper context.
|
WHISPER_API int whisper_pcm_to_mel_with_state(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
const float * samples,
|
||||||
|
int n_samples,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
|
// Convert RAW PCM audio to log mel spectrogram but applies a Phase Vocoder to speed up the audio x2.
|
||||||
|
// The resulting spectrogram is stored inside the default state of the provided whisper context.
|
||||||
|
// Returns 0 on success
|
||||||
|
WHISPER_API int whisper_pcm_to_mel_phase_vocoder(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
const float * samples,
|
||||||
|
int n_samples,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
|
WHISPER_API int whisper_pcm_to_mel_phase_vocoder_with_state(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
const float * samples,
|
||||||
|
int n_samples,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
|
// This can be used to set a custom log mel spectrogram inside the default state of the provided whisper context.
|
||||||
// Use this instead of whisper_pcm_to_mel() if you want to provide your own log mel spectrogram.
|
// Use this instead of whisper_pcm_to_mel() if you want to provide your own log mel spectrogram.
|
||||||
// n_mel must be 80
|
// n_mel must be 80
|
||||||
// Returns 0 on success
|
// Returns 0 on success
|
||||||
@ -123,7 +156,14 @@ extern "C" {
|
|||||||
int n_len,
|
int n_len,
|
||||||
int n_mel);
|
int n_mel);
|
||||||
|
|
||||||
// Run the Whisper encoder on the log mel spectrogram stored inside the provided whisper context.
|
WHISPER_API int whisper_set_mel_with_state(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
const float * data,
|
||||||
|
int n_len,
|
||||||
|
int n_mel);
|
||||||
|
|
||||||
|
// Run the Whisper encoder on the log mel spectrogram stored inside the default state in the provided whisper context.
|
||||||
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first.
|
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first.
|
||||||
// offset can be used to specify the offset of the first frame in the spectrogram.
|
// offset can be used to specify the offset of the first frame in the spectrogram.
|
||||||
// Returns 0 on success
|
// Returns 0 on success
|
||||||
@ -132,6 +172,12 @@ extern "C" {
|
|||||||
int offset,
|
int offset,
|
||||||
int n_threads);
|
int n_threads);
|
||||||
|
|
||||||
|
WHISPER_API int whisper_encode_with_state(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
int offset,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
// Run the Whisper decoder to obtain the logits and probabilities for the next token.
|
// Run the Whisper decoder to obtain the logits and probabilities for the next token.
|
||||||
// Make sure to call whisper_encode() first.
|
// Make sure to call whisper_encode() first.
|
||||||
// tokens + n_tokens is the provided context for the decoder.
|
// tokens + n_tokens is the provided context for the decoder.
|
||||||
@ -145,6 +191,14 @@ extern "C" {
|
|||||||
int n_past,
|
int n_past,
|
||||||
int n_threads);
|
int n_threads);
|
||||||
|
|
||||||
|
WHISPER_API int whisper_decode_with_state(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
const whisper_token * tokens,
|
||||||
|
int n_tokens,
|
||||||
|
int n_past,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
// Convert the provided text into tokens.
|
// Convert the provided text into tokens.
|
||||||
// The tokens pointer must be large enough to hold the resulting tokens.
|
// The tokens pointer must be large enough to hold the resulting tokens.
|
||||||
// Returns the number of tokens on success, no more than n_max_tokens
|
// Returns the number of tokens on success, no more than n_max_tokens
|
||||||
@ -180,17 +234,26 @@ extern "C" {
|
|||||||
int n_threads,
|
int n_threads,
|
||||||
float * lang_probs);
|
float * lang_probs);
|
||||||
|
|
||||||
WHISPER_API int whisper_n_len (struct whisper_context * ctx); // mel length
|
WHISPER_API int whisper_lang_auto_detect_with_state(
|
||||||
WHISPER_API int whisper_n_vocab (struct whisper_context * ctx);
|
struct whisper_context * ctx,
|
||||||
WHISPER_API int whisper_n_text_ctx (struct whisper_context * ctx);
|
struct whisper_state * state,
|
||||||
WHISPER_API int whisper_n_audio_ctx (struct whisper_context * ctx);
|
int offset_ms,
|
||||||
WHISPER_API int whisper_is_multilingual(struct whisper_context * ctx);
|
int n_threads,
|
||||||
|
float * lang_probs);
|
||||||
|
|
||||||
|
WHISPER_API int whisper_n_len (struct whisper_context * ctx); // mel length
|
||||||
|
WHISPER_API int whisper_n_len_from_state(struct whisper_state * state); // mel length
|
||||||
|
WHISPER_API int whisper_n_vocab (struct whisper_context * ctx);
|
||||||
|
WHISPER_API int whisper_n_text_ctx (struct whisper_context * ctx);
|
||||||
|
WHISPER_API int whisper_n_audio_ctx (struct whisper_context * ctx);
|
||||||
|
WHISPER_API int whisper_is_multilingual (struct whisper_context * ctx);
|
||||||
|
|
||||||
// Token logits obtained from the last call to whisper_decode()
|
// Token logits obtained from the last call to whisper_decode()
|
||||||
// The logits for the last token are stored in the last row
|
// The logits for the last token are stored in the last row
|
||||||
// Rows: n_tokens
|
// Rows: n_tokens
|
||||||
// Cols: n_vocab
|
// Cols: n_vocab
|
||||||
WHISPER_API float * whisper_get_logits(struct whisper_context * ctx);
|
WHISPER_API float * whisper_get_logits (struct whisper_context * ctx);
|
||||||
|
WHISPER_API float * whisper_get_logits_from_state(struct whisper_state * state);
|
||||||
|
|
||||||
// Token Id -> String. Uses the vocabulary in the provided context
|
// Token Id -> String. Uses the vocabulary in the provided context
|
||||||
WHISPER_API const char * whisper_token_to_str(struct whisper_context * ctx, whisper_token token);
|
WHISPER_API const char * whisper_token_to_str(struct whisper_context * ctx, whisper_token token);
|
||||||
@ -208,7 +271,7 @@ extern "C" {
|
|||||||
WHISPER_API whisper_token whisper_token_translate (void);
|
WHISPER_API whisper_token whisper_token_translate (void);
|
||||||
WHISPER_API whisper_token whisper_token_transcribe(void);
|
WHISPER_API whisper_token whisper_token_transcribe(void);
|
||||||
|
|
||||||
// Performance information
|
// Performance information from the default state.
|
||||||
WHISPER_API void whisper_print_timings(struct whisper_context * ctx);
|
WHISPER_API void whisper_print_timings(struct whisper_context * ctx);
|
||||||
WHISPER_API void whisper_reset_timings(struct whisper_context * ctx);
|
WHISPER_API void whisper_reset_timings(struct whisper_context * ctx);
|
||||||
|
|
||||||
@ -226,12 +289,23 @@ extern "C" {
|
|||||||
// Text segment callback
|
// Text segment callback
|
||||||
// Called on every newly generated text segment
|
// Called on every newly generated text segment
|
||||||
// Use the whisper_full_...() functions to obtain the text segments
|
// Use the whisper_full_...() functions to obtain the text segments
|
||||||
typedef void (*whisper_new_segment_callback)(struct whisper_context * ctx, int n_new, void * user_data);
|
typedef void (*whisper_new_segment_callback)(struct whisper_context * ctx, struct whisper_state * state, int n_new, void * user_data);
|
||||||
|
|
||||||
// Encoder begin callback
|
// Encoder begin callback
|
||||||
// If not NULL, called before the encoder starts
|
// If not NULL, called before the encoder starts
|
||||||
// If it returns false, the computation is aborted
|
// If it returns false, the computation is aborted
|
||||||
typedef bool (*whisper_encoder_begin_callback)(struct whisper_context * ctx, void * user_data);
|
typedef bool (*whisper_encoder_begin_callback)(struct whisper_context * ctx, struct whisper_state * state, void * user_data);
|
||||||
|
|
||||||
|
// Logits filter callback
|
||||||
|
// Can be used to modify the logits before sampling
|
||||||
|
// If not NULL, called after applying temperature to logits
|
||||||
|
typedef void (*whisper_logits_filter_callback)(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
const whisper_token_data * tokens,
|
||||||
|
int n_tokens,
|
||||||
|
float * logits,
|
||||||
|
void * user_data);
|
||||||
|
|
||||||
// Parameters for the whisper_full() function
|
// Parameters for the whisper_full() function
|
||||||
// If you chnage the order or add new parameters, make sure to update the default values in whisper.cpp:
|
// If you chnage the order or add new parameters, make sure to update the default values in whisper.cpp:
|
||||||
@ -257,6 +331,7 @@ extern "C" {
|
|||||||
float thold_pt; // timestamp token probability threshold (~0.01)
|
float thold_pt; // timestamp token probability threshold (~0.01)
|
||||||
float thold_ptsum; // timestamp token sum probability threshold (~0.01)
|
float thold_ptsum; // timestamp token sum probability threshold (~0.01)
|
||||||
int max_len; // max segment length in characters
|
int max_len; // max segment length in characters
|
||||||
|
bool split_on_word; // split on word rather than on token (when used with max_len)
|
||||||
int max_tokens; // max tokens per segment (0 = no limit)
|
int max_tokens; // max tokens per segment (0 = no limit)
|
||||||
|
|
||||||
// [EXPERIMENTAL] speed-up techniques
|
// [EXPERIMENTAL] speed-up techniques
|
||||||
@ -274,6 +349,7 @@ extern "C" {
|
|||||||
|
|
||||||
// common decoding parameters:
|
// common decoding parameters:
|
||||||
bool suppress_blank; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/decoding.py#L89
|
bool suppress_blank; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/decoding.py#L89
|
||||||
|
bool suppress_non_speech_tokens; // ref: https://github.com/openai/whisper/blob/7858aa9c08d98f75575035ecd6481f462d66ca27/whisper/tokenizer.py#L224-L253
|
||||||
|
|
||||||
float temperature; // initial decoding temperature, ref: https://ai.stackexchange.com/a/32478
|
float temperature; // initial decoding temperature, ref: https://ai.stackexchange.com/a/32478
|
||||||
float max_initial_ts; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/decoding.py#L97
|
float max_initial_ts; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/decoding.py#L97
|
||||||
@ -303,11 +379,16 @@ extern "C" {
|
|||||||
// called each time before the encoder starts
|
// called each time before the encoder starts
|
||||||
whisper_encoder_begin_callback encoder_begin_callback;
|
whisper_encoder_begin_callback encoder_begin_callback;
|
||||||
void * encoder_begin_callback_user_data;
|
void * encoder_begin_callback_user_data;
|
||||||
|
|
||||||
|
// called by each decoder to filter obtained logits
|
||||||
|
whisper_logits_filter_callback logits_filter_callback;
|
||||||
|
void * logits_filter_callback_user_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
WHISPER_API struct whisper_full_params whisper_full_default_params(enum whisper_sampling_strategy strategy);
|
WHISPER_API struct whisper_full_params whisper_full_default_params(enum whisper_sampling_strategy strategy);
|
||||||
|
|
||||||
// Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text
|
// Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text
|
||||||
|
// Not thread safe for same context
|
||||||
// Uses the specified decoding strategy to obtain the text.
|
// Uses the specified decoding strategy to obtain the text.
|
||||||
WHISPER_API int whisper_full(
|
WHISPER_API int whisper_full(
|
||||||
struct whisper_context * ctx,
|
struct whisper_context * ctx,
|
||||||
@ -315,7 +396,16 @@ extern "C" {
|
|||||||
const float * samples,
|
const float * samples,
|
||||||
int n_samples);
|
int n_samples);
|
||||||
|
|
||||||
// Split the input audio in chunks and process each chunk separately using whisper_full()
|
WHISPER_API int whisper_full_with_state(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
struct whisper_full_params params,
|
||||||
|
const float * samples,
|
||||||
|
int n_samples);
|
||||||
|
|
||||||
|
// Split the input audio in chunks and process each chunk separately using whisper_full_with_state()
|
||||||
|
// Result is stored in the default state of the context
|
||||||
|
// Not thread safe if executed in parallel on the same context.
|
||||||
// It seems this approach can offer some speedup in some cases.
|
// It seems this approach can offer some speedup in some cases.
|
||||||
// However, the transcription accuracy can be worse at the beginning and end of each chunk.
|
// However, the transcription accuracy can be worse at the beginning and end of each chunk.
|
||||||
WHISPER_API int whisper_full_parallel(
|
WHISPER_API int whisper_full_parallel(
|
||||||
@ -325,37 +415,56 @@ extern "C" {
|
|||||||
int n_samples,
|
int n_samples,
|
||||||
int n_processors);
|
int n_processors);
|
||||||
|
|
||||||
// Number of generated text segments.
|
// Number of generated text segments
|
||||||
// A segment can be a few words, a sentence, or even a paragraph.
|
// A segment can be a few words, a sentence, or even a paragraph.
|
||||||
WHISPER_API int whisper_full_n_segments(struct whisper_context * ctx);
|
WHISPER_API int whisper_full_n_segments (struct whisper_context * ctx);
|
||||||
|
WHISPER_API int whisper_full_n_segments_from_state(struct whisper_state * state);
|
||||||
|
|
||||||
// Get the start and end time of the specified segment.
|
// Language id associated with the context's default state
|
||||||
WHISPER_API int64_t whisper_full_get_segment_t0(struct whisper_context * ctx, int i_segment);
|
WHISPER_API int whisper_full_lang_id(struct whisper_context * ctx);
|
||||||
WHISPER_API int64_t whisper_full_get_segment_t1(struct whisper_context * ctx, int i_segment);
|
|
||||||
|
|
||||||
// Get the text of the specified segment.
|
// Language id associated with the provided state
|
||||||
WHISPER_API const char * whisper_full_get_segment_text(struct whisper_context * ctx, int i_segment);
|
WHISPER_API int whisper_full_lang_id_from_state(struct whisper_state * state);
|
||||||
|
|
||||||
// Get number of tokens in the specified segment.
|
// Get the start and end time of the specified segment
|
||||||
WHISPER_API int whisper_full_n_tokens(struct whisper_context * ctx, int i_segment);
|
WHISPER_API int64_t whisper_full_get_segment_t0 (struct whisper_context * ctx, int i_segment);
|
||||||
|
WHISPER_API int64_t whisper_full_get_segment_t0_from_state(struct whisper_state * state, int i_segment);
|
||||||
|
|
||||||
// Get the token text of the specified token in the specified segment.
|
WHISPER_API int64_t whisper_full_get_segment_t1 (struct whisper_context * ctx, int i_segment);
|
||||||
WHISPER_API const char * whisper_full_get_token_text(struct whisper_context * ctx, int i_segment, int i_token);
|
WHISPER_API int64_t whisper_full_get_segment_t1_from_state(struct whisper_state * state, int i_segment);
|
||||||
WHISPER_API whisper_token whisper_full_get_token_id (struct whisper_context * ctx, int i_segment, int i_token);
|
|
||||||
|
|
||||||
// Get token data for the specified token in the specified segment.
|
// Get the text of the specified segment
|
||||||
|
WHISPER_API const char * whisper_full_get_segment_text (struct whisper_context * ctx, int i_segment);
|
||||||
|
WHISPER_API const char * whisper_full_get_segment_text_from_state(struct whisper_state * state, int i_segment);
|
||||||
|
|
||||||
|
// Get number of tokens in the specified segment
|
||||||
|
WHISPER_API int whisper_full_n_tokens (struct whisper_context * ctx, int i_segment);
|
||||||
|
WHISPER_API int whisper_full_n_tokens_from_state(struct whisper_state * state, int i_segment);
|
||||||
|
|
||||||
|
// Get the token text of the specified token in the specified segment
|
||||||
|
WHISPER_API const char * whisper_full_get_token_text (struct whisper_context * ctx, int i_segment, int i_token);
|
||||||
|
WHISPER_API const char * whisper_full_get_token_text_from_state(struct whisper_context * ctx, struct whisper_state * state, int i_segment, int i_token);
|
||||||
|
|
||||||
|
WHISPER_API whisper_token whisper_full_get_token_id (struct whisper_context * ctx, int i_segment, int i_token);
|
||||||
|
WHISPER_API whisper_token whisper_full_get_token_id_from_state(struct whisper_state * state, int i_segment, int i_token);
|
||||||
|
|
||||||
|
// Get token data for the specified token in the specified segment
|
||||||
// This contains probabilities, timestamps, etc.
|
// This contains probabilities, timestamps, etc.
|
||||||
WHISPER_API whisper_token_data whisper_full_get_token_data(struct whisper_context * ctx, int i_segment, int i_token);
|
WHISPER_API whisper_token_data whisper_full_get_token_data (struct whisper_context * ctx, int i_segment, int i_token);
|
||||||
|
WHISPER_API whisper_token_data whisper_full_get_token_data_from_state(struct whisper_state * state, int i_segment, int i_token);
|
||||||
|
|
||||||
// Get the probability of the specified token in the specified segment.
|
// Get the probability of the specified token in the specified segment
|
||||||
WHISPER_API float whisper_full_get_token_p(struct whisper_context * ctx, int i_segment, int i_token);
|
WHISPER_API float whisper_full_get_token_p (struct whisper_context * ctx, int i_segment, int i_token);
|
||||||
|
WHISPER_API float whisper_full_get_token_p_from_state(struct whisper_state * state, int i_segment, int i_token);
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
// Temporary helpers needed for exposing ggml interface
|
// Temporary helpers needed for exposing ggml interface
|
||||||
|
|
||||||
WHISPER_API int whisper_bench_memcpy(int n_threads);
|
WHISPER_API int whisper_bench_memcpy(int n_threads);
|
||||||
|
WHISPER_API const char * whisper_bench_memcpy_str(int n_threads);
|
||||||
WHISPER_API int whisper_bench_ggml_mul_mat(int n_threads);
|
WHISPER_API int whisper_bench_ggml_mul_mat(int n_threads);
|
||||||
|
WHISPER_API const char * whisper_bench_ggml_mul_mat_str(int n_threads);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user