mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-07-04 16:30:58 +02:00
Compare commits
255 Commits
Author | SHA1 | Date | |
---|---|---|---|
05ce7476ae | |||
f11de0e73c | |||
d5cc27ee4d | |||
5bb1d58c6a | |||
7d14005717 | |||
4ffb8e3e4d | |||
1d8d8ae55e | |||
eebf6bc0bd | |||
dc8f423b40 | |||
548e7052f1 | |||
a34cb73dc2 | |||
82f9496657 | |||
e3c85e75bd | |||
b9eab73fa2 | |||
76385c8311 | |||
442cd1d2e7 | |||
bc8cb97e02 | |||
8dcadf736b | |||
93986b61e0 | |||
bd1a9e34c9 | |||
cc03608e78 | |||
54a54faee4 | |||
96a92ecc4c | |||
edd1d8686a | |||
dc6f4e7c05 | |||
74c85d154e | |||
eb2d8b6ffd | |||
b442dcd598 | |||
c98681e6d5 | |||
3bab804981 | |||
c927830a70 | |||
992b51b3d5 | |||
2c882cbe4c | |||
1fbb119b1e | |||
40dea850fd | |||
8255a830a8 | |||
a0f76b2da7 | |||
394768c48b | |||
846e01b2c0 | |||
6ac8e6b2ce | |||
60d2ddebdf | |||
2e180184a8 | |||
ef40950c4a | |||
c774eec709 | |||
5b481a27a6 | |||
fc7b1ee521 | |||
c42f67e2d2 | |||
339a1cba5d | |||
c64f3e8ada | |||
9f83f67221 | |||
7d3da68f79 | |||
b5d21359c1 | |||
17addf7104 | |||
cdaee8b4bd | |||
4b60ff4f92 | |||
b43b9d928c | |||
e3cb412a59 | |||
ac301a7d9b | |||
82e04e7670 | |||
38ac47cd4d | |||
2d70cd36d7 | |||
98dab49b9a | |||
b1385e9aa9 | |||
48f5e893f5 | |||
dc21871fcb | |||
64a430bc81 | |||
51a3580c79 | |||
37a21dd43d | |||
8a22a8b17f | |||
fcbcad0c90 | |||
4444db7360 | |||
a7fc1038ca | |||
1689aaf854 | |||
4b48fe449a | |||
47cc043e69 | |||
e3d9ffb98b | |||
e22d69839d | |||
defe731263 | |||
4e07957bf9 | |||
d2c5154bb5 | |||
4fac43fe00 | |||
3be9670f17 | |||
86729fcd6d | |||
7fbca6304e | |||
d597f83e1a | |||
e5edcc6259 | |||
556f773d53 | |||
91d02de332 | |||
1b67d72f87 | |||
14d7c0368d | |||
db6e19188a | |||
b4b063a5c9 | |||
930b739e7a | |||
5981352bb5 | |||
7561da244e | |||
be83f342fb | |||
fd369871f7 | |||
bbd8364f5e | |||
e4102440ef | |||
f8242ec483 | |||
ef51b4cba4 | |||
6f08b24146 | |||
7c165d7fa8 | |||
2f0cf44915 | |||
b9c972fd0d | |||
01c9aafbfd | |||
bae6bbf487 | |||
c310272fa0 | |||
bd0b55dbe0 | |||
ba4645db2c | |||
dfc6ca62f3 | |||
47e14c0529 | |||
d682e15090 | |||
46d07b9c85 | |||
33ea03f131 | |||
dbcc669e1a | |||
16245b35e4 | |||
898c0cb9d1 | |||
eb9e5032c4 | |||
cadfc50eab | |||
3f91832352 | |||
cff8868b5f | |||
90e3c5fc40 | |||
e0f4cef867 | |||
234460987e | |||
b8ab126343 | |||
edc5d9267c | |||
344b98a44f | |||
dbeb7916b8 | |||
fad2806352 | |||
9906792ec3 | |||
c49ee07ff4 | |||
f8a831779e | |||
85451e3612 | |||
43c744ce8b | |||
fc2e44490d | |||
f41fdad200 | |||
80fa576254 | |||
75e7d0585e | |||
682a6f5f87 | |||
115716d109 | |||
b2cfef655b | |||
22e3df0afa | |||
028511d349 | |||
70c4038842 | |||
8639c003a9 | |||
d5d831da65 | |||
7230a6e1c8 | |||
a160fa0f3a | |||
0282ad8fd1 | |||
9e467815d4 | |||
727891d9bf | |||
c262dc80e2 | |||
30767b4c4e | |||
16eeb31933 | |||
ba523d5e22 | |||
3736706139 | |||
58640aa456 | |||
5183a05e56 | |||
0dcada42d4 | |||
d507b4cebe | |||
90171055f3 | |||
668306ff2b | |||
fdc21fc87b | |||
7183a1eb72 | |||
09f3c66648 | |||
62e2414620 | |||
de49024e49 | |||
db6383094c | |||
164f13c6a9 | |||
02aa86230a | |||
54a2ee648f | |||
9700cfb0a3 | |||
8e0143e205 | |||
f12559d590 | |||
589b40810a | |||
7ffcd05267 | |||
7a423f1c00 | |||
99b011a9f5 | |||
19d95f9f9a | |||
d5ef1737d8 | |||
1deb41f0e7 | |||
2425caf4fd | |||
a4b00bcaaf | |||
cdb8aa2f2e | |||
06209f6683 | |||
c3235bd81e | |||
262d0abc87 | |||
124eec1664 | |||
b08c3a88c8 | |||
0afce25a69 | |||
acdbe58631 | |||
09fabffdf5 | |||
3988d6396b | |||
c8c63eeec0 | |||
abf7f24410 | |||
341f5c28e6 | |||
5377099524 | |||
dcbb375779 | |||
4334c71aed | |||
e875a82473 | |||
507e230f1e | |||
eb68324c86 | |||
e940fbf283 | |||
35d0e02c72 | |||
45d3faf961 | |||
2ab2eb5110 | |||
b82d305282 | |||
885e31368d | |||
8a9ad7844d | |||
eb874b3a3c | |||
eb78e3a3f1 | |||
ece3ff88f6 | |||
9366544991 | |||
95583942ed | |||
2e93cb6a2f | |||
de5cd60d1c | |||
3fcba3e58b | |||
cea5f1c52f | |||
2112462db4 | |||
fc84ecd445 | |||
8de1e99907 | |||
499af9294a | |||
bcf937c216 | |||
b8d90953d7 | |||
60a422147b | |||
3387415bad | |||
536ca3ec89 | |||
a4bb983190 | |||
39c205f555 | |||
6d502f33dc | |||
5ea27d089d | |||
1462d92588 | |||
7ba1a41f47 | |||
5ea088636f | |||
f32ddb3b1c | |||
79b75ece03 | |||
6348d73e55 | |||
fb36a1538a | |||
c81b8b910b | |||
85b60f31d0 | |||
227b5ffa36 | |||
36a64a253f | |||
c84b83c370 | |||
5136fd92c2 | |||
7d55637f0b | |||
0994506054 | |||
53c9a3a984 | |||
ed09075ca0 | |||
f07a81aa9f | |||
4183517076 | |||
f4668169a0 | |||
944ce49439 | |||
2e59dced12 | |||
e4e05981d6 |
@ -12,7 +12,7 @@ FROM ${BASE_CUDA_DEV_CONTAINER} as build
|
|||||||
ARG CUDA_DOCKER_ARCH=all
|
ARG CUDA_DOCKER_ARCH=all
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y build-essential git cmake libsdl2-dev wget
|
apt-get install -y build-essential git cmake libsdl2-dev wget git
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
|||||||
ENV GGML_CUDA=1
|
ENV GGML_CUDA=1
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y build-essential libsdl2-dev wget cmake \
|
apt-get install -y build-essential libsdl2-dev wget cmake git \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
# Ref: https://stackoverflow.com/a/53464012
|
# Ref: https://stackoverflow.com/a/53464012
|
||||||
@ -33,7 +33,7 @@ ENV LD_LIBRARY_PATH /usr/local/cuda-${CUDA_MAIN_VERSION}/compat:$LD_LIBRARY_PATH
|
|||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y curl ffmpeg wget cmake \
|
apt-get install -y curl ffmpeg wget cmake git \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
COPY --from=build /app /app
|
COPY --from=build /app /app
|
||||||
|
@ -2,7 +2,7 @@ FROM ubuntu:22.04 AS build
|
|||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y build-essential wget cmake \
|
apt-get install -y build-essential wget cmake git \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
COPY .. .
|
COPY .. .
|
||||||
@ -12,7 +12,7 @@ FROM ubuntu:22.04 AS runtime
|
|||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y curl ffmpeg libsdl2-dev wget cmake \
|
apt-get install -y curl ffmpeg libsdl2-dev wget cmake git \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
COPY --from=build /app /app
|
COPY --from=build /app /app
|
||||||
|
4
.github/workflows/bindings-go.yml
vendored
4
.github/workflows/bindings-go.yml
vendored
@ -10,8 +10,8 @@ on:
|
|||||||
- whisper.h
|
- whisper.h
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
ubuntu-latest:
|
ubuntu-22:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
|
18
.github/workflows/bindings-ruby.yml
vendored
18
.github/workflows/bindings-ruby.yml
vendored
@ -19,7 +19,12 @@ on:
|
|||||||
- ggml/**/*.m
|
- ggml/**/*.m
|
||||||
- ggml/**/*.metal
|
- ggml/**/*.metal
|
||||||
- scripts/get-flags.mk
|
- scripts/get-flags.mk
|
||||||
- examples/dr_wav.h
|
- examples/common.h
|
||||||
|
- examples/common.cpp
|
||||||
|
- examples/common-whisper.h
|
||||||
|
- examples/common-whisper.cpp
|
||||||
|
- examples/stb_vorbis.c
|
||||||
|
- examples/miniaudio.h
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- bindings/ruby/**
|
- bindings/ruby/**
|
||||||
@ -39,11 +44,16 @@ on:
|
|||||||
- ggml/**/*.m
|
- ggml/**/*.m
|
||||||
- ggml/**/*.metal
|
- ggml/**/*.metal
|
||||||
- scripts/get-flags.mk
|
- scripts/get-flags.mk
|
||||||
- examples/dr_wav.h
|
- examples/common.h
|
||||||
|
- examples/common.cpp
|
||||||
|
- examples/common-whisper.h
|
||||||
|
- examples/common-whisper.cpp
|
||||||
|
- examples/stb_vorbis.c
|
||||||
|
- examples/miniaudio.h
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
ubuntu-latest:
|
ubuntu-22:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: bindings/ruby
|
working-directory: bindings/ruby
|
||||||
|
333
.github/workflows/build.yml
vendored
333
.github/workflows/build.yml
vendored
@ -1,18 +1,28 @@
|
|||||||
name: CI
|
name: CI
|
||||||
on: [push, pull_request]
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
pull_request:
|
||||||
|
types: [opened, synchronize, reopened]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
ubuntu_image: "ubuntu:22.04"
|
ubuntu_image: "ubuntu:22.04"
|
||||||
VCPKG_BINARY_SOURCES: "clear;x-gha,readwrite"
|
VCPKG_BINARY_SOURCES: "clear;x-gha,readwrite"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
ubuntu-latest:
|
ubuntu-22:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le]
|
arch: [linux/amd64, linux/ppc64le]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
@ -28,17 +38,80 @@ jobs:
|
|||||||
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
set -e
|
set -e
|
||||||
apt update
|
apt update
|
||||||
apt install -y build-essential libsdl2-dev cmake
|
apt install -y build-essential libsdl2-dev cmake git
|
||||||
cmake -B build
|
cmake -B build
|
||||||
cmake --build build --config Release -j $(nproc)'
|
cmake --build build --config Release -j $(nproc)'
|
||||||
|
|
||||||
|
ubuntu-22-arm64:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
arch: [linux/arm64]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Build ${{ matrix.arch }}
|
||||||
|
run: |
|
||||||
|
docker run --platform ${{ matrix.arch }} --rm \
|
||||||
|
-v ${{ github.workspace }}:/workspace \
|
||||||
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
|
set -e
|
||||||
|
apt update
|
||||||
|
apt install -y build-essential libsdl2-dev cmake git
|
||||||
|
cmake -B build -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8-a
|
||||||
|
cmake --build build --config Release -j $(nproc)'
|
||||||
|
|
||||||
|
ubuntu-22-arm-v7:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
arch: [linux/arm/v7]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Build ${{ matrix.arch }}
|
||||||
|
run: |
|
||||||
|
docker run --platform ${{ matrix.arch }} --rm \
|
||||||
|
-v ${{ github.workspace }}:/workspace \
|
||||||
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
|
set -e
|
||||||
|
apt update
|
||||||
|
apt install -y build-essential libsdl2-dev cmake git
|
||||||
|
cmake -B build -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv7-a+fp
|
||||||
|
cmake --build build --config Release -j $(nproc)'
|
||||||
|
|
||||||
macOS-latest:
|
macOS-latest:
|
||||||
runs-on: macOS-latest
|
runs-on: macOS-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
destination: ['generic/platform=macOS', 'generic/platform=iOS', 'generic/platform=tvOS']
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
|
id: checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: ccache
|
||||||
|
uses: hendrikmuhs/ccache-action@v1.2.16
|
||||||
|
with:
|
||||||
|
key: macOS-latest-swift
|
||||||
|
evict-old-files: 1d
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
brew update
|
brew update
|
||||||
@ -46,8 +119,21 @@ jobs:
|
|||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
cmake -B build
|
sysctl -a
|
||||||
cmake --build build --config Release
|
cmake -B build -G Xcode \
|
||||||
|
-DGGML_METAL_USE_BF16=ON \
|
||||||
|
-DGGML_METAL_EMBED_LIBRARY=ON \
|
||||||
|
-DWHISPER_BUILD_EXAMPLES=OFF \
|
||||||
|
-DWHISPER_BUILD_TESTS=OFF \
|
||||||
|
-DWHISPER_BUILD_SERVER=OFF \
|
||||||
|
-DCMAKE_OSX_ARCHITECTURES="arm64;x86_64"
|
||||||
|
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
|
||||||
|
|
||||||
|
- name: xcodebuild for swift package
|
||||||
|
id: xcodebuild
|
||||||
|
run: |
|
||||||
|
./build-xcframework.sh
|
||||||
|
|
||||||
|
|
||||||
# freeBSD-latest:
|
# freeBSD-latest:
|
||||||
# runs-on: macos-12
|
# runs-on: macos-12
|
||||||
@ -67,14 +153,14 @@ jobs:
|
|||||||
# cmake -B build
|
# cmake -B build
|
||||||
# cmake --build build --config Release
|
# cmake --build build --config Release
|
||||||
|
|
||||||
ubuntu-latest-gcc:
|
ubuntu-22-gcc:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
build: [Debug, Release]
|
build: [Debug, Release]
|
||||||
arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le]
|
arch: [linux/amd64, linux/ppc64le]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
@ -90,13 +176,69 @@ jobs:
|
|||||||
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
set -e
|
set -e
|
||||||
apt update
|
apt update
|
||||||
apt install -y build-essential cmake libsdl2-dev
|
apt install -y build-essential cmake libsdl2-dev git
|
||||||
cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
make
|
make
|
||||||
ctest -L gh --output-on-failure'
|
ctest -L gh --output-on-failure'
|
||||||
|
|
||||||
ubuntu-latest-clang:
|
ubuntu-22-gcc-arm64:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
build: [Debug, Release]
|
||||||
|
arch: [linux/arm64]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Build ${{ matrix.arch }}
|
||||||
|
run: |
|
||||||
|
docker run --platform ${{ matrix.arch }} --rm \
|
||||||
|
-v ${{ github.workspace }}:/workspace \
|
||||||
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
|
set -e
|
||||||
|
apt update
|
||||||
|
apt install -y build-essential cmake libsdl2-dev git
|
||||||
|
cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8-a
|
||||||
|
make
|
||||||
|
ctest -L gh --output-on-failure'
|
||||||
|
|
||||||
|
ubuntu-22-gcc-arm-v7:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
build: [Debug, Release]
|
||||||
|
arch: [linux/arm/v7]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Build ${{ matrix.arch }}
|
||||||
|
run: |
|
||||||
|
docker run --platform ${{ matrix.arch }} --rm \
|
||||||
|
-v ${{ github.workspace }}:/workspace \
|
||||||
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
|
set -e
|
||||||
|
apt update
|
||||||
|
apt install -y build-essential cmake libsdl2-dev git
|
||||||
|
cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv7-a+fp
|
||||||
|
make
|
||||||
|
ctest -L gh --output-on-failure'
|
||||||
|
|
||||||
|
ubuntu-22-clang:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
@ -121,13 +263,13 @@ jobs:
|
|||||||
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
set -e
|
set -e
|
||||||
apt update
|
apt update
|
||||||
apt install -y clang build-essential cmake libsdl2-dev
|
apt install -y clang build-essential cmake libsdl2-dev git
|
||||||
cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang
|
cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang
|
||||||
make
|
make
|
||||||
ctest -L gh --output-on-failure'
|
ctest -L gh --output-on-failure'
|
||||||
|
|
||||||
ubuntu-latest-gcc-sanitized:
|
ubuntu-22-gcc-sanitized:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
@ -149,7 +291,7 @@ jobs:
|
|||||||
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
set -e
|
set -e
|
||||||
apt update
|
apt update
|
||||||
apt install -y build-essential cmake
|
apt install -y build-essential cmake git
|
||||||
cmake . -DCMAKE_BUILD_TYPE=Debug -DWHISPER_SANITIZE_${{ matrix.sanitizer }}=ON
|
cmake . -DCMAKE_BUILD_TYPE=Debug -DWHISPER_SANITIZE_${{ matrix.sanitizer }}=ON
|
||||||
make
|
make
|
||||||
ctest -L gh --output-on-failure'
|
ctest -L gh --output-on-failure'
|
||||||
@ -184,12 +326,12 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install intel-oneapi-compiler-dpcpp-cpp
|
sudo apt install intel-oneapi-compiler-dpcpp-cpp git
|
||||||
|
|
||||||
- name: install oneAPI MKL library
|
- name: install oneAPI MKL library
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt install intel-oneapi-mkl-devel
|
sudo apt install intel-oneapi-mkl-devel git
|
||||||
|
|
||||||
- name: Clone
|
- name: Clone
|
||||||
id: checkout
|
id: checkout
|
||||||
@ -234,7 +376,7 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install intel-oneapi-compiler-dpcpp-cpp
|
sudo apt install intel-oneapi-compiler-dpcpp-cpp git
|
||||||
|
|
||||||
- name: install oneAPI MKL library
|
- name: install oneAPI MKL library
|
||||||
shell: bash
|
shell: bash
|
||||||
@ -275,6 +417,7 @@ jobs:
|
|||||||
msystem: ${{matrix.sys}}
|
msystem: ${{matrix.sys}}
|
||||||
install: >-
|
install: >-
|
||||||
base-devel
|
base-devel
|
||||||
|
git
|
||||||
mingw-w64-${{matrix.env}}-toolchain
|
mingw-w64-${{matrix.env}}-toolchain
|
||||||
mingw-w64-${{matrix.env}}-cmake
|
mingw-w64-${{matrix.env}}-cmake
|
||||||
mingw-w64-${{matrix.env}}-SDL2
|
mingw-w64-${{matrix.env}}-SDL2
|
||||||
@ -430,75 +573,76 @@ jobs:
|
|||||||
name: whisper-blas-bin-${{ matrix.arch }}
|
name: whisper-blas-bin-${{ matrix.arch }}
|
||||||
path: build/bin/${{ matrix.build }}
|
path: build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
# TODO: fix and re-enable
|
windows-cublas:
|
||||||
# windows-cublas:
|
runs-on: windows-2019
|
||||||
# runs-on: windows-2019
|
strategy:
|
||||||
#
|
matrix:
|
||||||
# strategy:
|
build: [Release]
|
||||||
# matrix:
|
arch: [x64]
|
||||||
# build: [Release]
|
cublas: [ON]
|
||||||
# arch: [x64]
|
sdl2: [ON]
|
||||||
# cublas: [ON]
|
cuda-toolkit: [12.2.0, 11.8.0]
|
||||||
# sdl2: [ON]
|
include:
|
||||||
# cuda-toolkit: [12.2.0, 11.8.0]
|
- arch: x64
|
||||||
# include:
|
sdl2: ON
|
||||||
# - arch: x64
|
sdl2_ver: 2.28.5
|
||||||
# s2arc: x64
|
steps:
|
||||||
# - sdl2: ON
|
- name: Clone repository
|
||||||
# s2ver: 2.28.5
|
uses: actions/checkout@v4
|
||||||
#
|
|
||||||
# steps:
|
- name: Add msbuild to PATH
|
||||||
# - name: Clone
|
uses: microsoft/setup-msbuild@v2
|
||||||
# uses: actions/checkout@v4
|
|
||||||
#
|
- name: Install CUDA Toolkit
|
||||||
# - name: Add msbuild to PATH
|
id: cuda-toolkit
|
||||||
# uses: microsoft/setup-msbuild@v2
|
uses: Jimver/cuda-toolkit@v0.2.15
|
||||||
#
|
with:
|
||||||
# - name: Install CUDA Toolkit
|
cuda: '${{ matrix.cuda-toolkit }}'
|
||||||
# id: cuda-toolkit
|
|
||||||
# uses: Jimver/cuda-toolkit@v0.2.15
|
- name: Install 7-Zip
|
||||||
# with:
|
run: choco install 7zip -y
|
||||||
# cuda: '${{ matrix.cuda-toolkit }}'
|
|
||||||
#
|
- name: Fetch SDL2 and set SDL2_DIR
|
||||||
# - name: Fetch SDL2 and set SDL2_DIR
|
if: matrix.sdl2 == 'ON'
|
||||||
# if: matrix.sdl2 == 'ON'
|
run: |
|
||||||
# run: |
|
Invoke-WebRequest -Uri https://github.com/libsdl-org/SDL/releases/download/release-${{ matrix.sdl2_ver }}/SDL2-devel-${{ matrix.sdl2_ver }}-VC.zip -OutFile sdl2.zip
|
||||||
# C:/msys64/usr/bin/wget.exe -qO sdl2.zip https://github.com/libsdl-org/SDL/releases/download/release-${{ matrix.s2ver }}/SDL2-devel-${{ matrix.s2ver }}-VC.zip
|
7z x sdl2.zip
|
||||||
# 7z x sdl2.zip
|
echo "SDL2_DIR=${{ github.workspace }}\SDL2-${{ matrix.sdl2_ver }}\cmake" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
# echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV
|
echo "${{ github.workspace }}\SDL2-${{ matrix.sdl2_ver }}\cmake" > SDL2_PATH.txt
|
||||||
#
|
|
||||||
# - name: Configure
|
- name: Configure CMake
|
||||||
# run: >
|
shell: cmd
|
||||||
# cmake -S . -B ./build -A ${{ matrix.arch }}
|
run: |
|
||||||
# -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
cmake -S . -B ./build -A ${{ matrix.arch }} ^
|
||||||
# -DGGML_CUDA=${{ matrix.cublas }}
|
-DCMAKE_BUILD_TYPE=${{ matrix.build }} ^
|
||||||
# -DWHISPER_SDL2=${{ matrix.sdl2 }}
|
-DGGML_CUDA=${{ matrix.cublas }} ^
|
||||||
#
|
-DCMAKE_CUDA_ARCHITECTURES=all ^
|
||||||
# - name: Build ${{ matrix.cuda-toolkit }}
|
-DWHISPER_SDL2=${{ matrix.sdl2 }} ^
|
||||||
# run: |
|
-DSDL2_DIR="%SDL2_DIR%"
|
||||||
# cd ./build
|
|
||||||
# cmake --build . --config ${{ matrix.build }}
|
- name: Build Project
|
||||||
#
|
shell: cmd
|
||||||
# - name: Copy CUDA DLLs
|
run: |
|
||||||
# run: >
|
cd ./build
|
||||||
# Copy-Item -PassThru
|
cmake --build . --config ${{ matrix.build }}
|
||||||
# -Path "${{ steps.cuda-toolkit.outputs.CUDA_PATH }}/bin/*.dll"
|
|
||||||
# -Include cudart64_*,cublas64_*,cublasLt64_*
|
- name: Copy CUDA DLLs
|
||||||
# -Destination build/bin/${{ matrix.build }}
|
run: |
|
||||||
#
|
Get-ChildItem "${{ steps.cuda-toolkit.outputs.CUDA_PATH }}/bin/" -Filter "*.dll" |
|
||||||
# - name: Copy SDL2.dll
|
Copy-Item -Destination "build/bin/${{ matrix.build }}"
|
||||||
# if: matrix.sdl2 == 'ON'
|
|
||||||
# run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
- name: Copy SDL2.dll
|
||||||
#
|
if: matrix.sdl2 == 'ON'
|
||||||
# - name: Upload binaries
|
run: copy "$env:SDL2_DIR/../lib/${{ matrix.arch }}/SDL2.dll" build/bin/${{ matrix.build }}
|
||||||
# if: matrix.sdl2 == 'ON'
|
|
||||||
# uses: actions/upload-artifact@v4
|
- name: Upload binaries
|
||||||
# with:
|
uses: actions/upload-artifact@v4
|
||||||
# name: whisper-cublas-${{ matrix.cuda-toolkit }}-bin-${{ matrix.arch }}
|
with:
|
||||||
# path: build/bin/${{ matrix.build }}
|
name: whisper-cublas-${{ matrix.cuda-toolkit }}-bin-${{ matrix.arch }}
|
||||||
|
path: build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
emscripten:
|
emscripten:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@ -551,21 +695,20 @@ jobs:
|
|||||||
-DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
|
-DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
|
||||||
-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
|
-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
|
||||||
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
|
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
|
||||||
sudo cmake --install . --config Release
|
|
||||||
|
|
||||||
- name: xcodebuild for swift package
|
- name: xcodebuild for swift package
|
||||||
id: xcodebuild
|
id: xcodebuild
|
||||||
run: |
|
run: |
|
||||||
xcodebuild -scheme whisper-Package -destination 'generic/platform=iOS'
|
./build-xcframework.sh
|
||||||
|
|
||||||
#- name: Build objc example
|
- name: Build objc example
|
||||||
# run: xcodebuild -project examples/whisper.objc/whisper.objc.xcodeproj -scheme whisper.objc -configuration ${{ matrix.build }} -sdk iphoneos build
|
run: xcodebuild -project examples/whisper.objc/whisper.objc.xcodeproj -scheme whisper.objc -configuration ${{ matrix.build }} -sdk iphoneos CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO FRAMEWORK_FOLDER_PATH=./build-ios build
|
||||||
|
|
||||||
- name: Build swiftui example
|
- name: Build swiftui example
|
||||||
run: xcodebuild -project examples/whisper.swiftui/whisper.swiftui.xcodeproj -scheme WhisperCppDemo -configuration ${{ matrix.build }} -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build
|
run: xcodebuild -project examples/whisper.swiftui/whisper.swiftui.xcodeproj -scheme WhisperCppDemo -configuration ${{ matrix.build }} -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build
|
||||||
|
|
||||||
android:
|
android:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
@ -595,7 +738,7 @@ jobs:
|
|||||||
|
|
||||||
# TODO: disable because of following fail: https://github.com/ggerganov/whisper.cpp/actions/runs/11019444420/job/30627193602
|
# TODO: disable because of following fail: https://github.com/ggerganov/whisper.cpp/actions/runs/11019444420/job/30627193602
|
||||||
# android_java:
|
# android_java:
|
||||||
# runs-on: ubuntu-latest
|
# runs-on: ubuntu-22.04
|
||||||
#
|
#
|
||||||
# steps:
|
# steps:
|
||||||
# - name: Clone
|
# - name: Clone
|
||||||
@ -664,7 +807,7 @@ jobs:
|
|||||||
# PGP_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
|
# PGP_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
|
||||||
|
|
||||||
quantize:
|
quantize:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
|
6
.github/workflows/docker.yml
vendored
6
.github/workflows/docker.yml
vendored
@ -11,13 +11,13 @@ jobs:
|
|||||||
name: Push Docker image to Docker Hub
|
name: Push Docker image to Docker Hub
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
env:
|
env:
|
||||||
COMMIT_SHA: ${{ github.sha }}
|
COMMIT_SHA: ${{ github.sha }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
config:
|
config:
|
||||||
- { tag: "main", dockerfile: ".devops/main.Dockerfile", platform: "linux/amd64,linux/arm64" }
|
- { tag: "main", dockerfile: ".devops/main.Dockerfile", platform: "linux/amd64" }
|
||||||
#TODO: the cuda image keeps failing - disable for now
|
#TODO: the cuda image keeps failing - disable for now
|
||||||
# https://github.com/ggerganov/whisper.cpp/actions/runs/11019444428/job/30602020339
|
# https://github.com/ggerganov/whisper.cpp/actions/runs/11019444428/job/30602020339
|
||||||
#- { tag: "main-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platform: "linux/amd64" }
|
#- { tag: "main-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platform: "linux/amd64" }
|
||||||
@ -28,6 +28,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v3
|
||||||
|
with:
|
||||||
|
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
6
.github/workflows/examples.yml
vendored
6
.github/workflows/examples.yml
vendored
@ -10,8 +10,8 @@ on:
|
|||||||
- whisper.h
|
- whisper.h
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
addon_node-ubuntu-latest:
|
addon_node-ubuntu-22:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
node-version: [ 16.x, 18.x ]
|
node-version: [ 16.x, 18.x ]
|
||||||
@ -22,7 +22,7 @@ jobs:
|
|||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install build-essential
|
sudo apt-get install build-essential git
|
||||||
sudo apt-get install cmake
|
sudo apt-get install cmake
|
||||||
sudo apt-get install libsdl2-dev
|
sudo apt-get install libsdl2-dev
|
||||||
|
|
||||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -58,3 +58,5 @@ cmake-build-debug/
|
|||||||
.cxx/
|
.cxx/
|
||||||
.gradle/
|
.gradle/
|
||||||
local.properties
|
local.properties
|
||||||
|
.log
|
||||||
|
.exe
|
211
AUTHORS
211
AUTHORS
@ -1,34 +1,51 @@
|
|||||||
# date: Tue Apr 9 20:27:03 EEST 2024
|
# date: Tue Feb 4 13:03:35 EET 2025
|
||||||
# this file is auto-generated by scripts/gen-authors.sh
|
# this file is auto-generated by scripts/gen-authors.sh
|
||||||
|
|
||||||
0/0 <zero@imaskeleton.me>
|
0/0 <zero@imaskeleton.me>
|
||||||
0cc4m <picard12@live.de>
|
0cc4m <picard12@live.de>
|
||||||
0xsourcecode <134374803+0xsourcecode@users.noreply.github.com>
|
0xsourcecode <134374803+0xsourcecode@users.noreply.github.com>
|
||||||
|
65a <10104049+65a@users.noreply.github.com>
|
||||||
|
AIWintermuteAI <32562299+AIWintermuteAI@users.noreply.github.com>
|
||||||
AT <manyoso@users.noreply.github.com>
|
AT <manyoso@users.noreply.github.com>
|
||||||
Aarni Koskela <akx@iki.fi>
|
Aarni Koskela <akx@iki.fi>
|
||||||
Aaron Pham <29749331+aarnphm@users.noreply.github.com>
|
Aaron Pham <29749331+aarnphm@users.noreply.github.com>
|
||||||
Aaron Taylor <aaron@exphat.com>
|
Aaron Taylor <aaron@exphat.com>
|
||||||
Abhilash Majumder <30946547+abhilash1910@users.noreply.github.com>
|
Abhilash Majumder <30946547+abhilash1910@users.noreply.github.com>
|
||||||
Abitofevrything <54505189+abitofevrything@users.noreply.github.com>
|
Abitofevrything <54505189+abitofevrything@users.noreply.github.com>
|
||||||
|
Adam Jones <domdomegg+git@gmail.com>
|
||||||
|
Adrien Gallouët <adrien@gallouet.fr>
|
||||||
|
Adrien Gallouët <angt@huggingface.co>
|
||||||
AfryMask <AfryMask@163.com>
|
AfryMask <AfryMask@163.com>
|
||||||
Ahmad Bilal <ahmad.bilal@empglabs.com>
|
Ahmad Bilal <ahmad.bilal@empglabs.com>
|
||||||
|
Ahmad Tameem <113388789+Tameem-10xE@users.noreply.github.com>
|
||||||
AidanBeltonS <87009434+AidanBeltonS@users.noreply.github.com>
|
AidanBeltonS <87009434+AidanBeltonS@users.noreply.github.com>
|
||||||
|
AidanBeltonS <aidan.belton@codeplay.com>
|
||||||
|
Akarshan Biswas <akarshan.biswas@gmail.com>
|
||||||
|
Akarshan Biswas <akarshanbiswas@fedoraproject.org>
|
||||||
Akash Mahajan <akash7190@gmail.com>
|
Akash Mahajan <akash7190@gmail.com>
|
||||||
Akash Mahajan <akashmjn@stanford.edu>
|
Akash Mahajan <akashmjn@stanford.edu>
|
||||||
Al Hoang <3811822-hoanga@users.noreply.gitlab.com>
|
Al Hoang <3811822-hoanga@users.noreply.gitlab.com>
|
||||||
Alan <unknown>
|
Alan <unknown>
|
||||||
|
Albert Jin <albert.jin@gmail.com>
|
||||||
|
Alberto Cabrera Pérez <alberto.cabrera@codeplay.com>
|
||||||
|
Alberto Cabrera Pérez <alberto.cabrera@intel.com>
|
||||||
Aleksander Andrzejewski <18704749+aleksanderandrzejewski@users.noreply.github.com>
|
Aleksander Andrzejewski <18704749+aleksanderandrzejewski@users.noreply.github.com>
|
||||||
Alex Azarov <alex@azarov.by>
|
Alex Azarov <alex@azarov.by>
|
||||||
Alex Bacart <13940752+alex-bacart@users.noreply.github.com>
|
Alex Bacart <13940752+alex-bacart@users.noreply.github.com>
|
||||||
Alex Evgrashin <aevgrashin@yandex.ru>
|
Alex Evgrashin <aevgrashin@yandex.ru>
|
||||||
|
Alex O'Connell <35843486+acon96@users.noreply.github.com>
|
||||||
Alexandr Graschenkov <alexandr.graschenkov91@gmail.com>
|
Alexandr Graschenkov <alexandr.graschenkov91@gmail.com>
|
||||||
Alexandru Mariuti <alex@mariuti.com>
|
Alexandru Mariuti <alex@mariuti.com>
|
||||||
Alexey Kharlamov <alexey@kharlamov.biz>
|
Alexey Kharlamov <alexey@kharlamov.biz>
|
||||||
Alfredo Montesinos <alfredo.montesinos@g.austincc.edu>
|
Alfredo Montesinos <alfredo.montesinos@g.austincc.edu>
|
||||||
Ali Alameh <ali.alameh@isae.edu.lb>
|
Ali Alameh <ali.alameh@isae.edu.lb>
|
||||||
|
Alter <0x7c48@gmail.com>
|
||||||
Ananta Bastola <anantarajbastola@gmail.com>
|
Ananta Bastola <anantarajbastola@gmail.com>
|
||||||
|
Andreas Kieslinger <47689530+aendk@users.noreply.github.com>
|
||||||
|
Andreas Lubbe <git@lubbe.org>
|
||||||
Andreu Huguet <andreuhuguet@gmail.com>
|
Andreu Huguet <andreuhuguet@gmail.com>
|
||||||
Andrew Huynh <a5thuynh@gmail.com>
|
Andrew Huynh <a5thuynh@gmail.com>
|
||||||
|
Andrew Minh Nguyen <40281306+amqdn@users.noreply.github.com>
|
||||||
Andrew S <andrews54757@gmail.com>
|
Andrew S <andrews54757@gmail.com>
|
||||||
Andy Maloney <asmaloney@gmail.com>
|
Andy Maloney <asmaloney@gmail.com>
|
||||||
Anton Kostin <masguit42@users.noreply.github.com>
|
Anton Kostin <masguit42@users.noreply.github.com>
|
||||||
@ -40,8 +57,11 @@ AustinMroz <austinmroz@utexas.edu>
|
|||||||
Avik Sengupta <avik@sengupta.net>
|
Avik Sengupta <avik@sengupta.net>
|
||||||
Bader-eddine Ouaich <49657842+baderouaich@users.noreply.github.com>
|
Bader-eddine Ouaich <49657842+baderouaich@users.noreply.github.com>
|
||||||
Baffin Lee <baffinlee@gmail.com>
|
Baffin Lee <baffinlee@gmail.com>
|
||||||
|
Ben Ashbaugh <ben.ashbaugh@intel.com>
|
||||||
Ben Nortier <bjnortier@gmail.com>
|
Ben Nortier <bjnortier@gmail.com>
|
||||||
Benjamin Heiniger <benjamin.heiniger@bluewin.ch>
|
Benjamin Heiniger <benjamin.heiniger@bluewin.ch>
|
||||||
|
Bernhard M. Wiedemann <githubbmwprimary@lsmod.de>
|
||||||
|
Binozo <70137898+Binozo@users.noreply.github.com>
|
||||||
Bo-Yi Wu <appleboy.tw@gmail.com>
|
Bo-Yi Wu <appleboy.tw@gmail.com>
|
||||||
Boris Bliznioukov <blib@mail.com>
|
Boris Bliznioukov <blib@mail.com>
|
||||||
Borislav Stanimirov <b.stanimirov@abv.bg>
|
Borislav Stanimirov <b.stanimirov@abv.bg>
|
||||||
@ -49,47 +69,86 @@ Brad Murray <59848399+bradmurray-dt@users.noreply.github.com>
|
|||||||
Brian Murray <brian@bmurray.ca>
|
Brian Murray <brian@bmurray.ca>
|
||||||
CRD716 <crd716@gmail.com>
|
CRD716 <crd716@gmail.com>
|
||||||
Canis Lupus <Canis-UK@users.noreply.github.com>
|
Canis Lupus <Canis-UK@users.noreply.github.com>
|
||||||
|
Carlos Zoido <mrgalleta@gmail.com>
|
||||||
Carolinabanana <140120812+Carolinabanana@users.noreply.github.com>
|
Carolinabanana <140120812+Carolinabanana@users.noreply.github.com>
|
||||||
|
CarterLi999 <664681047@qq.com>
|
||||||
ChangSeok Oh <shivamidow@users.noreply.github.com>
|
ChangSeok Oh <shivamidow@users.noreply.github.com>
|
||||||
|
Changyeon Kim <cyzero.kim@samsung.com>
|
||||||
Chaoqun <27287694+OpenWaygate@users.noreply.github.com>
|
Chaoqun <27287694+OpenWaygate@users.noreply.github.com>
|
||||||
|
Charles Xu <63788048+chaxu01@users.noreply.github.com>
|
||||||
|
Charles Xu <charles.xu@arm.com>
|
||||||
|
Chen Xi <xi2.chen@intel.com>
|
||||||
|
Chen Xi <xixichen08@foxmail.com>
|
||||||
|
Chenguang Li <87689256+noemotiovon@users.noreply.github.com>
|
||||||
Chia-Hsiang Cheng <88014292+garychia@users.noreply.github.com>
|
Chia-Hsiang Cheng <88014292+garychia@users.noreply.github.com>
|
||||||
Chidi Williams <williamschidi1@gmail.com>
|
Chidi Williams <williamschidi1@gmail.com>
|
||||||
|
Chris Elrod <elrodc@gmail.com>
|
||||||
Christian <12550267+iceychris@users.noreply.github.com>
|
Christian <12550267+iceychris@users.noreply.github.com>
|
||||||
|
Christian Kastner <ckk@kvr.at>
|
||||||
Clifford Heath <clifford.heath@gmail.com>
|
Clifford Heath <clifford.heath@gmail.com>
|
||||||
|
Clint Herron <hanclinto@gmail.com>
|
||||||
Colin <github@whoisc.cc>
|
Colin <github@whoisc.cc>
|
||||||
|
Conrad Kramer <conrad@conradkramer.com>
|
||||||
|
Corey Earwood <iamcgn+github@gmail.com>
|
||||||
|
CrispStrobe <154636388+CrispStrobe@users.noreply.github.com>
|
||||||
|
DAN™ <dranger003@gmail.com>
|
||||||
DGdev91 <DGdev91@users.noreply.github.com>
|
DGdev91 <DGdev91@users.noreply.github.com>
|
||||||
Damian Czaja <trojan295@protonmail.com>
|
Damian Czaja <trojan295@protonmail.com>
|
||||||
|
Dan Johansson <164997844+eddnjjn@users.noreply.github.com>
|
||||||
|
Dan Johansson <dan.johansson@arm.com>
|
||||||
Daniel Bevenius <daniel.bevenius@gmail.com>
|
Daniel Bevenius <daniel.bevenius@gmail.com>
|
||||||
|
Daniel Valdivia <18384552+dvaldivia@users.noreply.github.com>
|
||||||
|
Daniel Ziegenberg <daniel@ziegenberg.at>
|
||||||
|
Daniele <57776841+daniandtheweb@users.noreply.github.com>
|
||||||
|
Dave <dave-fl@users.noreply.github.com>
|
||||||
|
Dave Airlie <airlied@gmail.com>
|
||||||
|
Dave Airlie <airlied@redhat.com>
|
||||||
|
Daven Sanassy <daven@vochlea.co.uk>
|
||||||
David <dnhkng@gmail.com>
|
David <dnhkng@gmail.com>
|
||||||
David Thorpe <djt@mutablelogic.com>
|
David Thorpe <djt@mutablelogic.com>
|
||||||
|
DavidKorczynski <david@adalogics.com>
|
||||||
Davidson Francis <davidsondfgl@gmail.com>
|
Davidson Francis <davidsondfgl@gmail.com>
|
||||||
Dener Stassun <denerstassun@gmail.com>
|
Dener Stassun <denerstassun@gmail.com>
|
||||||
|
Dibakar Gope <dibakar.gope@arm.com>
|
||||||
Didzis Gosko <didzis@users.noreply.github.com>
|
Didzis Gosko <didzis@users.noreply.github.com>
|
||||||
|
Diego Devesa <slarengh@gmail.com>
|
||||||
Digipom <admin@digipom.com>
|
Digipom <admin@digipom.com>
|
||||||
Dimo <dimo@ieee.org>
|
Dimo <dimo@ieee.org>
|
||||||
|
Djip007 <3705339+Djip007@users.noreply.github.com>
|
||||||
|
Djip007 <djip.perois@free.fr>
|
||||||
Dody Suria Wijaya <dodysw@gmail.com>
|
Dody Suria Wijaya <dodysw@gmail.com>
|
||||||
|
Dou Xinpeng <15529241576@163.com>
|
||||||
|
Dou Xinpeng <81913537+Dou-Git@users.noreply.github.com>
|
||||||
Dr. Tom Murphy VII Ph.D <499244+tom7@users.noreply.github.com>
|
Dr. Tom Murphy VII Ph.D <499244+tom7@users.noreply.github.com>
|
||||||
Duncan McConnell <ddmcconnell4@gmail.com>
|
Duncan McConnell <ddmcconnell4@gmail.com>
|
||||||
Egor Egorov <me@egorfine.com>
|
Egor Egorov <me@egorfine.com>
|
||||||
Elkana Bardugo <ttv200@gmail.com>
|
Elkana Bardugo <ttv200@gmail.com>
|
||||||
Emmanuel Schmidbauer <eschmidbauer@gmail.com>
|
Emmanuel Schmidbauer <eschmidbauer@gmail.com>
|
||||||
Engininja2 <139037756+Engininja2@users.noreply.github.com>
|
Engininja2 <139037756+Engininja2@users.noreply.github.com>
|
||||||
|
Eric Curtin <ericcurtin17@gmail.com>
|
||||||
Eric Swanson <eswanson@alloscomp.com>
|
Eric Swanson <eswanson@alloscomp.com>
|
||||||
Eric Tendian <erictendian@gmail.com>
|
Eric Tendian <erictendian@gmail.com>
|
||||||
|
Eric Zhang <34133756+EZForever@users.noreply.github.com>
|
||||||
Erik Scholz <Green-Sky@users.noreply.github.com>
|
Erik Scholz <Green-Sky@users.noreply.github.com>
|
||||||
Evan Jones <evan.q.jones@gmail.com>
|
Evan Jones <evan.q.jones@gmail.com>
|
||||||
Evan Martin <evan.martin@gmail.com>
|
Evan Martin <evan.martin@gmail.com>
|
||||||
Eve <139727413+netrunnereve@users.noreply.github.com>
|
Eve <139727413+netrunnereve@users.noreply.github.com>
|
||||||
Evgeny Kuznetsov <evgeny@kuznetsov.md>
|
Evgeny Kuznetsov <evgeny@kuznetsov.md>
|
||||||
F1L1P <78918286+F1L1Pv2@users.noreply.github.com>
|
F1L1P <78918286+F1L1Pv2@users.noreply.github.com>
|
||||||
|
Faisal Zaghloul <quic_fzaghlou@quicinc.com>
|
||||||
Fangjun Kuang <csukuangfj@gmail.com>
|
Fangjun Kuang <csukuangfj@gmail.com>
|
||||||
Felix <stenbackfelix@gmail.com>
|
Felix <stenbackfelix@gmail.com>
|
||||||
Finn Voorhees <finnvoorhees@gmail.com>
|
Finn Voorhees <finnvoorhees@gmail.com>
|
||||||
|
FirstTimeEZ <179362031+FirstTimeEZ@users.noreply.github.com>
|
||||||
FlippFuzz <41221030+FlippFuzz@users.noreply.github.com>
|
FlippFuzz <41221030+FlippFuzz@users.noreply.github.com>
|
||||||
|
Frankie Robertson <frankier@users.noreply.github.com>
|
||||||
Gang Chen <goncha@gmail.com>
|
Gang Chen <goncha@gmail.com>
|
||||||
Gavin Cai <gavin1818@hotmail.com>
|
Gavin Cai <gavin1818@hotmail.com>
|
||||||
George Hindle <george@georgehindle.com>
|
George Hindle <george@georgehindle.com>
|
||||||
Georgi Gerganov <ggerganov@gmail.com>
|
Georgi Gerganov <ggerganov@gmail.com>
|
||||||
|
Gilad S <7817232+giladgd@users.noreply.github.com>
|
||||||
|
Gilad S <giladgd@users.noreply.github.com>
|
||||||
|
Gilad S. <7817232+giladgd@users.noreply.github.com>
|
||||||
GitAritron <103900385+GitAritron@users.noreply.github.com>
|
GitAritron <103900385+GitAritron@users.noreply.github.com>
|
||||||
GiviMAD <GiviMAD@users.noreply.github.com>
|
GiviMAD <GiviMAD@users.noreply.github.com>
|
||||||
Gleicon Moraes <gleicon@gmail.com>
|
Gleicon Moraes <gleicon@gmail.com>
|
||||||
@ -98,41 +157,66 @@ Guillaume Wenzek <gwenzek@users.noreply.github.com>
|
|||||||
HY. Kelvin Lee <34256578+hykelvinlee42@users.noreply.github.com>
|
HY. Kelvin Lee <34256578+hykelvinlee42@users.noreply.github.com>
|
||||||
Halalaluyafail3 <55773281+Halalaluyafail3@users.noreply.github.com>
|
Halalaluyafail3 <55773281+Halalaluyafail3@users.noreply.github.com>
|
||||||
Hang <bebound@gmail.com>
|
Hang <bebound@gmail.com>
|
||||||
|
Haus1 <haus.xda@gmail.com>
|
||||||
Herman Semenov <GermanAizek@yandex.ru>
|
Herman Semenov <GermanAizek@yandex.ru>
|
||||||
|
HimariO <dsfhe49854@gmail.com>
|
||||||
|
Hong Bo PENG <penghb@cn.ibm.com>
|
||||||
Hrishikesh Barman <geekodour@users.noreply.github.com>
|
Hrishikesh Barman <geekodour@users.noreply.github.com>
|
||||||
|
Hugo <hugo@whynothugo.nl>
|
||||||
Ian Bicking <ian@ianbicking.org>
|
Ian Bicking <ian@ianbicking.org>
|
||||||
Ian Bull <irbull@eclipsesource.com>
|
Ian Bull <irbull@eclipsesource.com>
|
||||||
|
Ihar Hrachyshka <ihrachys@redhat.com>
|
||||||
Ikko Ashimine <eltociear@gmail.com>
|
Ikko Ashimine <eltociear@gmail.com>
|
||||||
|
Ikko Eltociear Ashimine <eltociear@gmail.com>
|
||||||
InconsolableCellist <23345188+InconsolableCellist@users.noreply.github.com>
|
InconsolableCellist <23345188+InconsolableCellist@users.noreply.github.com>
|
||||||
Ismatulla Mansurov <47342870+sapoepsilon@users.noreply.github.com>
|
Ismatulla Mansurov <47342870+sapoepsilon@users.noreply.github.com>
|
||||||
|
Ivan <nekotekina@gmail.com>
|
||||||
|
Ivan Filipov <159561759+vanaka11@users.noreply.github.com>
|
||||||
Ivan Gorin <ivangorin21@gmail.com>
|
Ivan Gorin <ivangorin21@gmail.com>
|
||||||
|
Ivo von Putzer Reibegg <ivo.putzer@gmail.com>
|
||||||
JJ <103335846+computerscienceiscool@users.noreply.github.com>
|
JJ <103335846+computerscienceiscool@users.noreply.github.com>
|
||||||
Jack Mousseau <jmousseau@users.noreply.github.com>
|
Jack Mousseau <jmousseau@users.noreply.github.com>
|
||||||
JacobLinCool <jacoblincool@gmail.com>
|
JacobLinCool <jacoblincool@gmail.com>
|
||||||
Jakub Ráček <blizzcz@gmail.com>
|
Jakub Ráček <blizzcz@gmail.com>
|
||||||
Jared Van Bortel <jared@nomic.ai>
|
Jared Van Bortel <jared@nomic.ai>
|
||||||
Jay Binks <jaybinks@gmail.com>
|
Jay Binks <jaybinks@gmail.com>
|
||||||
|
Jayant <jayantyadav202@gmail.com>
|
||||||
|
Jeff Bolz <jbolz@nvidia.com>
|
||||||
|
Jeroen Mostert <jeroen.mostert@cm.com>
|
||||||
Jhen-Jie Hong <developer@jhen.me>
|
Jhen-Jie Hong <developer@jhen.me>
|
||||||
Jhen-Jie Hong <iainst0409@gmail.com>
|
Jhen-Jie Hong <iainst0409@gmail.com>
|
||||||
JidongZhang-THU <1119708529@qq.com>
|
JidongZhang-THU <1119708529@qq.com>
|
||||||
Jo Liss <joliss42@gmail.com>
|
Jo Liss <joliss42@gmail.com>
|
||||||
|
Joe Todd <joe.todd@codeplay.com>
|
||||||
Johan <jr.raffin@gmail.com>
|
Johan <jr.raffin@gmail.com>
|
||||||
Johannes Gäßler <johannesg@5d6.de>
|
Johannes Gäßler <johannesg@5d6.de>
|
||||||
John Balis <phobossystems@gmail.com>
|
John Balis <phobossystems@gmail.com>
|
||||||
|
JohnnyB <jboero@users.noreply.github.com>
|
||||||
Jonathan Soo <jcsoo@agora.com>
|
Jonathan Soo <jcsoo@agora.com>
|
||||||
Jonno <1160532+razodactyl@users.noreply.github.com>
|
Jonno <1160532+razodactyl@users.noreply.github.com>
|
||||||
Joonas Pihlajamaa <joonas.pihlajamaa@iki.fi>
|
Joonas Pihlajamaa <joonas.pihlajamaa@iki.fi>
|
||||||
Jose <34888496+Jerry-Master@users.noreply.github.com>
|
Jose <34888496+Jerry-Master@users.noreply.github.com>
|
||||||
Josh Bleecher Snyder <josharian@gmail.com>
|
Josh Bleecher Snyder <josharian@gmail.com>
|
||||||
|
Josscii <jossciiweiyi@gmail.com>
|
||||||
Judd <foldl@users.noreply.github.com>
|
Judd <foldl@users.noreply.github.com>
|
||||||
Jumper775 <78500318+jumpers775@users.noreply.github.com>
|
Jumper775 <78500318+jumpers775@users.noreply.github.com>
|
||||||
|
Jun Hee Yoo <contact.jhyoo@gmail.com>
|
||||||
|
Junil Kim <logyourself@gmail.com>
|
||||||
|
Justina Cho <justcho5@gmail.com>
|
||||||
Justine Tunney <jtunney@gmail.com>
|
Justine Tunney <jtunney@gmail.com>
|
||||||
|
Justine Tunney <jtunney@mozilla.com>
|
||||||
|
KITAITI Makoto <KitaitiMakoto@gmail.com>
|
||||||
KP Kaiser <kirk@zothcorp.com>
|
KP Kaiser <kirk@zothcorp.com>
|
||||||
Kamilake <exjang0@gmail.com>
|
Kamilake <exjang0@gmail.com>
|
||||||
|
Karol Kontny <82021046+kkontny@users.noreply.github.com>
|
||||||
|
Karthick <j.karthic2004@gmail.com>
|
||||||
Kartik Saranathan <278928+Kartiku@users.noreply.github.com>
|
Kartik Saranathan <278928+Kartiku@users.noreply.github.com>
|
||||||
Kasumi <90275229+kasumi-1@users.noreply.github.com>
|
Kasumi <90275229+kasumi-1@users.noreply.github.com>
|
||||||
Kawrakow <48489457+ikawrakow@users.noreply.github.com>
|
Kawrakow <48489457+ikawrakow@users.noreply.github.com>
|
||||||
|
Kendrick Taylor <kendrick@circuitsix.com>
|
||||||
Kevin Brothaler <admin@digipom.com>
|
Kevin Brothaler <admin@digipom.com>
|
||||||
|
Kevin Gibbons <bakkot@gmail.com>
|
||||||
|
Konosuke Sakai <konosuke@konosuke.work>
|
||||||
Konstantin Zhuravlyov <konstantin.zhuravlyov@amd.com>
|
Konstantin Zhuravlyov <konstantin.zhuravlyov@amd.com>
|
||||||
Kreijstal <rainb@tfwno.gf>
|
Kreijstal <rainb@tfwno.gf>
|
||||||
Kylin <56434533+KyL0N@users.noreply.github.com>
|
Kylin <56434533+KyL0N@users.noreply.github.com>
|
||||||
@ -147,56 +231,110 @@ Luis Herrera <herrera-luis@users.noreply.github.com>
|
|||||||
Lukas Rist <glaslos@gmail.com>
|
Lukas Rist <glaslos@gmail.com>
|
||||||
M. A. Ali <73258591+MightyStud@users.noreply.github.com>
|
M. A. Ali <73258591+MightyStud@users.noreply.github.com>
|
||||||
M. Eren Akbiyik <erenakbiyik@gmail.com>
|
M. Eren Akbiyik <erenakbiyik@gmail.com>
|
||||||
|
Ma Mingfei <mingfei.ma@intel.com>
|
||||||
Maciek <maciek.mab122@gmail.com>
|
Maciek <maciek.mab122@gmail.com>
|
||||||
|
Mahesh Madhav <67384846+heshpdx@users.noreply.github.com>
|
||||||
Marcin Mielniczuk <marmistrz.dev@zoho.eu>
|
Marcin Mielniczuk <marmistrz.dev@zoho.eu>
|
||||||
|
Mark Karpelès <MagicalTux@users.noreply.github.com>
|
||||||
|
Mark Zhuang <zhuangqiubin@gmail.com>
|
||||||
|
Markus Tavenrath <mtavenrath@users.noreply.github.com>
|
||||||
|
Martin Delille <martin@delille.org>
|
||||||
Martin Warnaar <martinwarnaar@gmail.com>
|
Martin Warnaar <martinwarnaar@gmail.com>
|
||||||
|
Masaya, Kato <62578291+msy-kato@users.noreply.github.com>
|
||||||
Matheus de Sousa <23645013+keyehzy@users.noreply.github.com>
|
Matheus de Sousa <23645013+keyehzy@users.noreply.github.com>
|
||||||
|
Mathieu Baudier <mbaudier@argeo.org>
|
||||||
Mathijs de Bruin <mathijs@mathijsfietst.nl>
|
Mathijs de Bruin <mathijs@mathijsfietst.nl>
|
||||||
Matija Pevec <mightymatth@users.noreply.github.com>
|
Matija Pevec <mightymatth@users.noreply.github.com>
|
||||||
|
Matt Stephenson <mstephenson6@users.noreply.github.com>
|
||||||
|
Max Krasnyansky <max.krasnyansky@gmail.com>
|
||||||
|
Max Krasnyansky <quic_maxk@quicinc.com>
|
||||||
Maximiliano Levi <8160966+maxilevi@users.noreply.github.com>
|
Maximiliano Levi <8160966+maxilevi@users.noreply.github.com>
|
||||||
Meng, Hengyu <hengyu.meng@intel.com>
|
Meng, Hengyu <hengyu.meng@intel.com>
|
||||||
|
Mengqing Cao <cmq0113@163.com>
|
||||||
Michael Podvitskiy <podvitskiymichael@gmail.com>
|
Michael Podvitskiy <podvitskiymichael@gmail.com>
|
||||||
Michael Rienstra <mrienstra@gmail.com>
|
Michael Rienstra <mrienstra@gmail.com>
|
||||||
Mikhail Grigorev <sleuthhound@gmail.com>
|
Mikhail Grigorev <sleuthhound@gmail.com>
|
||||||
Mohammadreza Hendiani <hendiani.mohammadreza@gmail.com>
|
Mohammadreza Hendiani <hendiani.mohammadreza@gmail.com>
|
||||||
Mohit Agarwal <mohit@sdf.org>
|
Mohit Agarwal <mohit@sdf.org>
|
||||||
|
Molly Sophia <mollysophia379@gmail.com>
|
||||||
Murilo Santana <mvrilo@gmail.com>
|
Murilo Santana <mvrilo@gmail.com>
|
||||||
|
NETZkultur GmbH <mulholland@netzkultur.de>
|
||||||
|
Natsu <chino@hotococoa.moe>
|
||||||
Neil Chudleigh <nchudleigh@users.noreply.github.com>
|
Neil Chudleigh <nchudleigh@users.noreply.github.com>
|
||||||
|
Neo Zhang <14088817+arthw@users.noreply.github.com>
|
||||||
Neo Zhang Jianyu <jianyu.zhang@intel.com>
|
Neo Zhang Jianyu <jianyu.zhang@intel.com>
|
||||||
Neuman Vong <neuman.vong@gmail.com>
|
Neuman Vong <neuman.vong@gmail.com>
|
||||||
|
Nicholai Tukanov <nicholaitukanov@gmail.com>
|
||||||
Nicholas Albion <nalbion@yahoo.com>
|
Nicholas Albion <nalbion@yahoo.com>
|
||||||
|
Nico Bosshard <nico@bosshome.ch>
|
||||||
|
Nicolò Scipione <nicolo.scipione@codeplay.com>
|
||||||
Niels Mayer <Niels.Mayer@gmail.com>
|
Niels Mayer <Niels.Mayer@gmail.com>
|
||||||
|
Nikita Sarychev <42014488+sARY77@users.noreply.github.com>
|
||||||
|
Nikolaj Olsson <nikse.dk@gmail.com>
|
||||||
Okabintaro <103938900+Okabintaro@users.noreply.github.com>
|
Okabintaro <103938900+Okabintaro@users.noreply.github.com>
|
||||||
Oleg Sidorov <me@whitebox.io>
|
Oleg Sidorov <me@whitebox.io>
|
||||||
Oleg Sidorov <oleg@sidorov.nl>
|
Oleg Sidorov <oleg@sidorov.nl>
|
||||||
|
Olivier Chafik <ochafik@users.noreply.github.com>
|
||||||
Ondrej Kokes <ondrej.kokes@gmail.com>
|
Ondrej Kokes <ondrej.kokes@gmail.com>
|
||||||
Ouadie EL FAROUKI <ouadie.elfarouki@codeplay.com>
|
Ouadie EL FAROUKI <ouadie.elfarouki@codeplay.com>
|
||||||
|
PAB <pierreantoine.bannier@gmail.com>
|
||||||
Paul Tsochantaris <ptsochantaris@icloud.com>
|
Paul Tsochantaris <ptsochantaris@icloud.com>
|
||||||
|
Pedro Probst <pprobst@insiberia.net>
|
||||||
|
Peng <hzp1024@qq.com>
|
||||||
|
Peter <peter277@users.noreply.github.com>
|
||||||
Philipp Zabel <philipp.zabel@gmail.com>
|
Philipp Zabel <philipp.zabel@gmail.com>
|
||||||
Philippe Normand <phil@base-art.net>
|
Philippe Normand <phil@base-art.net>
|
||||||
|
Philippe Normand <philn@igalia.com>
|
||||||
|
Plamen Minev <pacominev@gmail.com>
|
||||||
|
Prashant Vithule <119530321+Vithulep@users.noreply.github.com>
|
||||||
Przemysław Pawełczyk <przemoc@gmail.com>
|
Przemysław Pawełczyk <przemoc@gmail.com>
|
||||||
Qianhe Chen <54462604+chenqianhe@users.noreply.github.com>
|
Qianhe Chen <54462604+chenqianhe@users.noreply.github.com>
|
||||||
|
R0CKSTAR <xiaodong.ye@mthreads.com>
|
||||||
|
R0CKSTAR <yeahdongcn@gmail.com>
|
||||||
|
Radoslav Gerganov <rgerganov@gmail.com>
|
||||||
Radosław Gryta <radek.gryta@gmail.com>
|
Radosław Gryta <radek.gryta@gmail.com>
|
||||||
|
Rahul Vadhyar <107788610+RahulVadhyar@users.noreply.github.com>
|
||||||
|
Raiya Araki <83504221+rai62@users.noreply.github.com>
|
||||||
Reinforce-II <fate@eastal.com>
|
Reinforce-II <fate@eastal.com>
|
||||||
Reinis Muiznieks <muiznieks.reinis@gmail.com>
|
Reinis Muiznieks <muiznieks.reinis@gmail.com>
|
||||||
RelatedTitle <r3latedtitle@gmail.com>
|
RelatedTitle <r3latedtitle@gmail.com>
|
||||||
|
Rémy Oudompheng <oudomphe@phare.normalesup.org>
|
||||||
RhinoDevel <RhinoDevel@users.noreply.github.com>
|
RhinoDevel <RhinoDevel@users.noreply.github.com>
|
||||||
Rich Jones <miserlou@gmail.com>
|
Rich Jones <miserlou@gmail.com>
|
||||||
|
Robert Ormandi <52251610+ormandi@users.noreply.github.com>
|
||||||
Robin <robin.xw@hotmail.com>
|
Robin <robin.xw@hotmail.com>
|
||||||
Roddur Dasgupta <roddurd@gmail.com>
|
Roddur Dasgupta <roddurd@gmail.com>
|
||||||
Roland Rabien <figbug@gmail.com>
|
Roland Rabien <figbug@gmail.com>
|
||||||
|
Romain Biessy <romain.biessy@codeplay.com>
|
||||||
|
Ronsor <ronsor@ronsor.pw>
|
||||||
Rotem Dan <rotemdan@gmail.com>
|
Rotem Dan <rotemdan@gmail.com>
|
||||||
Ryan Hitchman <hitchmanr@gmail.com>
|
Ryan Hitchman <hitchmanr@gmail.com>
|
||||||
Ryan Metcalfe <107415876+RyanMetcalfeInt8@users.noreply.github.com>
|
Ryan Metcalfe <107415876+RyanMetcalfeInt8@users.noreply.github.com>
|
||||||
RyanChang <ftes90015@gmail.com>
|
RyanChang <ftes90015@gmail.com>
|
||||||
|
SRHMorris <69468379+SRHMorris@users.noreply.github.com>
|
||||||
|
SXX <sxx1136965276@gmail.com>
|
||||||
|
Sacha Arbonel <sacha.arbonel@hotmail.fr>
|
||||||
|
Salman Faroz <stsfaroz@gmail.com>
|
||||||
|
Salvatore Mesoraca <s.mesoraca16@gmail.com>
|
||||||
Sam <49637763+Onlyartist9@users.noreply.github.com>
|
Sam <49637763+Onlyartist9@users.noreply.github.com>
|
||||||
Sam Pullara <spullara@gmail.com>
|
Sam Pullara <spullara@gmail.com>
|
||||||
|
Samuel Durante <44513615+samueldurantes@users.noreply.github.com>
|
||||||
Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com>
|
Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com>
|
||||||
|
Sandro Hanea <40202887+sandrohanea@users.noreply.github.com>
|
||||||
|
Sergio López <slp@redhat.com>
|
||||||
Sergio López <slp@sinrega.org>
|
Sergio López <slp@sinrega.org>
|
||||||
|
Shanshan Shen <467638484@qq.com>
|
||||||
|
Shijie <821898965@qq.com>
|
||||||
|
Shupei Fan <dymarkfan@outlook.com>
|
||||||
Siddharth Ramakrishnan <srr2141@columbia.edu>
|
Siddharth Ramakrishnan <srr2141@columbia.edu>
|
||||||
|
Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com>
|
||||||
Simon Moisselin <simon.moisstoll@gmail.com>
|
Simon Moisselin <simon.moisstoll@gmail.com>
|
||||||
Sindre Sorhus <sindresorhus@gmail.com>
|
Sindre Sorhus <sindresorhus@gmail.com>
|
||||||
Slava Primenko <primenko.s@gmail.com>
|
Slava Primenko <primenko.s@gmail.com>
|
||||||
|
Srihari-mcw <96763064+Srihari-mcw@users.noreply.github.com>
|
||||||
|
Stavros Panakakis <53979866+Stavrospanakakis@users.noreply.github.com>
|
||||||
|
Stefan Sydow <s.sydow@heinlein-video.de>
|
||||||
|
Stefan Sydow <stefan@sydow.email>
|
||||||
Syahmi Azhar <prsyahmi@gmail.com>
|
Syahmi Azhar <prsyahmi@gmail.com>
|
||||||
Syed Jafri <syedjafri97@gmail.com>
|
Syed Jafri <syedjafri97@gmail.com>
|
||||||
Sơn Phan Trung <phantrungson17@gmail.com>
|
Sơn Phan Trung <phantrungson17@gmail.com>
|
||||||
@ -205,37 +343,63 @@ Takeshi Inoue <inoue.takeshi@gmail.com>
|
|||||||
Tamotsu Takahashi <ttakah+github@gmail.com>
|
Tamotsu Takahashi <ttakah+github@gmail.com>
|
||||||
Taras Glek <taras@thegp.com>
|
Taras Glek <taras@thegp.com>
|
||||||
Tauseef Mohiuddin <35351464+tauseefmohammed2@users.noreply.github.com>
|
Tauseef Mohiuddin <35351464+tauseefmohammed2@users.noreply.github.com>
|
||||||
|
Thamster <Thamster@users.noreply.github.com>
|
||||||
Thijs Raymakers <thijs@raymakers.nl>
|
Thijs Raymakers <thijs@raymakers.nl>
|
||||||
Thomas Fitzsimmons <fitzsim@fitzsim.org>
|
Thomas Fitzsimmons <fitzsim@fitzsim.org>
|
||||||
Tiago Fassoni <tiagofassoni@users.noreply.github.com>
|
Tiago Fassoni <tiagofassoni@users.noreply.github.com>
|
||||||
Tienshiao Ma <tienshiao@tienshiao.org>
|
Tienshiao Ma <tienshiao@tienshiao.org>
|
||||||
|
Tim Miller <drasticactions@users.noreply.github.com>
|
||||||
Timothy Cronin <40186632+4imothy@users.noreply.github.com>
|
Timothy Cronin <40186632+4imothy@users.noreply.github.com>
|
||||||
Tobrun <tobrun.van.nuland@gmail.com>
|
Tobrun <tobrun.van.nuland@gmail.com>
|
||||||
Todd <taf2@users.noreply.github.com>
|
Todd <taf2@users.noreply.github.com>
|
||||||
|
Toliver <teejae@gmail.com>
|
||||||
Tong Li <31761981+litongjava@users.noreply.github.com>
|
Tong Li <31761981+litongjava@users.noreply.github.com>
|
||||||
|
Tony Wasserka <4840017+neobrain@users.noreply.github.com>
|
||||||
Topping1 <78745143+Topping1@users.noreply.github.com>
|
Topping1 <78745143+Topping1@users.noreply.github.com>
|
||||||
Travis Cline <travis.cline@gmail.com>
|
Travis Cline <travis.cline@gmail.com>
|
||||||
UEXTM.com <84163508+uextm@users.noreply.github.com>
|
UEXTM.com <84163508+uextm@users.noreply.github.com>
|
||||||
|
UsernamesLame <156965854+UsernamesLame@users.noreply.github.com>
|
||||||
Vadim Peretokin <vperetokin@hey.com>
|
Vadim Peretokin <vperetokin@hey.com>
|
||||||
Valentin Gosu <1454649+valenting@users.noreply.github.com>
|
Valentin Gosu <1454649+valenting@users.noreply.github.com>
|
||||||
|
Vin Misra <vinith@alum.mit.edu>
|
||||||
Vulcan <93451215+trholding@users.noreply.github.com>
|
Vulcan <93451215+trholding@users.noreply.github.com>
|
||||||
WhiteOlivierus <36532695+WhiteOlivierus@users.noreply.github.com>
|
WhiteOlivierus <36532695+WhiteOlivierus@users.noreply.github.com>
|
||||||
|
William Tambellini <william.tambellini@gmail.com>
|
||||||
|
William Tambellini <wtambellini@sdl.com>
|
||||||
|
Wilson Silva <wilson.dsigns@gmail.com>
|
||||||
Xiang (Kevin) Li <kevinli020508@gmail.com>
|
Xiang (Kevin) Li <kevinli020508@gmail.com>
|
||||||
Xiao-Yong Jin <jinxiaoyong@gmail.com>
|
Xiao-Yong Jin <jinxiaoyong@gmail.com>
|
||||||
XiaotaoChen <chenxiaotao1234@gmail.com>
|
XiaotaoChen <chenxiaotao1234@gmail.com>
|
||||||
|
Xingchen Song(宋星辰) <xingchensong1996@163.com>
|
||||||
|
Xinpeng Dou <81913537+Dou-Git@users.noreply.github.com>
|
||||||
|
Xuan Son Nguyen <thichthat@gmail.com>
|
||||||
Yajing Tang <phillis@google.com>
|
Yajing Tang <phillis@google.com>
|
||||||
Yang Shen <aplshenyang@gmail.com>
|
Yang Shen <aplshenyang@gmail.com>
|
||||||
Yunès <jean.baptiste.yunes@free.fr>
|
Yunès <jean.baptiste.yunes@free.fr>
|
||||||
|
Yuri Khrustalev <ykhrustalev@users.noreply.github.com>
|
||||||
|
Yusuf Redžić <48274562+redzic@users.noreply.github.com>
|
||||||
ZaBlazzingZephyrus <119159668+blazingzephyr@users.noreply.github.com>
|
ZaBlazzingZephyrus <119159668+blazingzephyr@users.noreply.github.com>
|
||||||
|
Zhenwei Jin <109658203+kylo5aby@users.noreply.github.com>
|
||||||
|
Zhiyuan Li <lizhiyuan@uniartisan.com>
|
||||||
|
Zhiyuan Li <uniartisan2017@gmail.com>
|
||||||
Zigfrid Zvezdin <ziggerZZ@gmail.com>
|
Zigfrid Zvezdin <ziggerZZ@gmail.com>
|
||||||
Zollner <24618122+Zolliner@users.noreply.github.com>
|
Zollner <24618122+Zolliner@users.noreply.github.com>
|
||||||
|
a3sh <38979186+A3shTnT@users.noreply.github.com>
|
||||||
|
ag2s20150909 <19373730+ag2s20150909@users.noreply.github.com>
|
||||||
|
agray3 <agray3@users.noreply.github.com>
|
||||||
ai-at-home <149282006+ai-at-home@users.noreply.github.com>
|
ai-at-home <149282006+ai-at-home@users.noreply.github.com>
|
||||||
|
aldorof <aldorof@users.noreply.github.com>
|
||||||
alonfaraj <alonfaraj@gmail.com>
|
alonfaraj <alonfaraj@gmail.com>
|
||||||
|
amd-dwang <dong.wang@amd.com>
|
||||||
|
amritahs-ibm <amritahs@linux.vnet.ibm.com>
|
||||||
andypayne <apayne@gmail.com>
|
andypayne <apayne@gmail.com>
|
||||||
ardfork <134447697+ardfork@users.noreply.github.com>
|
ardfork <134447697+ardfork@users.noreply.github.com>
|
||||||
|
arizhih <40765267+arizhih@users.noreply.github.com>
|
||||||
automaticcat <daogiatuank54@gmail.com>
|
automaticcat <daogiatuank54@gmail.com>
|
||||||
|
bandoti <141645996+bandoti@users.noreply.github.com>
|
||||||
be-next <jerome.ramette@gmail.com>
|
be-next <jerome.ramette@gmail.com>
|
||||||
bert hubert <bert@hubertnet.nl>
|
bert hubert <bert@hubertnet.nl>
|
||||||
|
billyct <billy_allen@126.com>
|
||||||
bmwl <brian.marshall@tolko.com>
|
bmwl <brian.marshall@tolko.com>
|
||||||
bobqianic <129547291+bobqianic@users.noreply.github.com>
|
bobqianic <129547291+bobqianic@users.noreply.github.com>
|
||||||
bocytko <bocytko+github@gmail.com>
|
bocytko <bocytko+github@gmail.com>
|
||||||
@ -248,7 +412,9 @@ byte-6174 <88070277+byte-6174@users.noreply.github.com>
|
|||||||
cdosoftei <ciprian.dosoftei@gmail.com>
|
cdosoftei <ciprian.dosoftei@gmail.com>
|
||||||
clach04 <Chris.Clark@actian.com>
|
clach04 <Chris.Clark@actian.com>
|
||||||
compilade <113953597+compilade@users.noreply.github.com>
|
compilade <113953597+compilade@users.noreply.github.com>
|
||||||
|
compilade <git@compilade.net>
|
||||||
conradg <conradjgodfrey@gmail.com>
|
conradg <conradjgodfrey@gmail.com>
|
||||||
|
crummyh <elijah@crums.us>
|
||||||
ddpasa <112642920+ddpasa@users.noreply.github.com>
|
ddpasa <112642920+ddpasa@users.noreply.github.com>
|
||||||
denersc <denerstassun@gmail.com>
|
denersc <denerstassun@gmail.com>
|
||||||
dscripka <dscripka@users.noreply.github.com>
|
dscripka <dscripka@users.noreply.github.com>
|
||||||
@ -256,28 +422,55 @@ duthils <duthils@duthils.net>
|
|||||||
ecneladis <ecneladis@users.noreply.github.com>
|
ecneladis <ecneladis@users.noreply.github.com>
|
||||||
faker <nspyia2002@gmail.com>
|
faker <nspyia2002@gmail.com>
|
||||||
fitzsim <fitzsim@fitzsim.org>
|
fitzsim <fitzsim@fitzsim.org>
|
||||||
|
fj-y-saito <85871716+fj-y-saito@users.noreply.github.com>
|
||||||
fraxy-v <65565042+fraxy-v@users.noreply.github.com>
|
fraxy-v <65565042+fraxy-v@users.noreply.github.com>
|
||||||
genevera (she/her) <genevera@users.noreply.github.com>
|
genevera (she/her) <genevera@users.noreply.github.com>
|
||||||
geniusnut <geniusnut@gmail.com>
|
geniusnut <geniusnut@gmail.com>
|
||||||
|
gilbertgong <gilbert.gong@gmail.com>
|
||||||
|
gn64 <yukikaze.jp@gmail.com>
|
||||||
|
goldwaving <77494627+goldwaving@users.noreply.github.com>
|
||||||
greeshmay <greeshmay@gmail.com>
|
greeshmay <greeshmay@gmail.com>
|
||||||
|
haopeng <657407891@qq.com>
|
||||||
|
hipudding <huafengchun@gmail.com>
|
||||||
|
hsinhoyeh <yhh92u@gmail.com>
|
||||||
hydai <z54981220@gmail.com>
|
hydai <z54981220@gmail.com>
|
||||||
iamthad <thadeus.j.fleming@gmail.com>
|
iamthad <thadeus.j.fleming@gmail.com>
|
||||||
|
issixx <46835150+issixx@users.noreply.github.com>
|
||||||
james wolf <contractorwolf@hotmail.com>
|
james wolf <contractorwolf@hotmail.com>
|
||||||
|
jdomke <28772296+jdomke@users.noreply.github.com>
|
||||||
|
jettoblack <jettoblack@gmail.com>
|
||||||
|
jiez <373447296@qq.com>
|
||||||
joecryptotoo <80373433+joecryptotoo@users.noreply.github.com>
|
joecryptotoo <80373433+joecryptotoo@users.noreply.github.com>
|
||||||
jorismertz <35079666+jorismertz@users.noreply.github.com>
|
jorismertz <35079666+jorismertz@users.noreply.github.com>
|
||||||
|
junchao-loongson <68935141+junchao-loongson@users.noreply.github.com>
|
||||||
junkfood <69683722+JunkFood02@users.noreply.github.com>
|
junkfood <69683722+JunkFood02@users.noreply.github.com>
|
||||||
jwijffels <jwijffels@bnosac.be>
|
jwijffels <jwijffels@bnosac.be>
|
||||||
|
k.h.lai <adrian.k.h.lai@outlook.com>
|
||||||
kamranjon <kamranjon@gmail.com>
|
kamranjon <kamranjon@gmail.com>
|
||||||
katsu560 <katsu560oo-@docomo.ne.jp>
|
katsu560 <katsu560oo-@docomo.ne.jp>
|
||||||
kennethge <57784063+kenneth-ge@users.noreply.github.com>
|
kennethge <57784063+kenneth-ge@users.noreply.github.com>
|
||||||
keyehzy <msamuel@aluno.puc-rio.br>
|
keyehzy <msamuel@aluno.puc-rio.br>
|
||||||
|
kunnis <kunnis@users.noreply.github.com>
|
||||||
|
l3utterfly <gc.pthzfoldr@gmail.com>
|
||||||
leejet <leejet714@gmail.com>
|
leejet <leejet714@gmail.com>
|
||||||
|
leo-pony <nengjunma@outlook.com>
|
||||||
|
lhez <quic_lih@quicinc.com>
|
||||||
litong <31761981+litongjava@users.noreply.github.com>
|
litong <31761981+litongjava@users.noreply.github.com>
|
||||||
|
liuwei-git <14815172+liuwei-git@users.noreply.github.com>
|
||||||
lnyan <lkwq007@gmail.com>
|
lnyan <lkwq007@gmail.com>
|
||||||
|
luoyu-intel <yu.luo@intel.com>
|
||||||
m.bell <m.bell@techsmith.com>
|
m.bell <m.bell@techsmith.com>
|
||||||
|
mahorozte <41834471+mahorozte@users.noreply.github.com>
|
||||||
|
mashizora <30516315+mashizora@users.noreply.github.com>
|
||||||
|
matt23654 <matthew.webber@protonmail.com>
|
||||||
|
matteo <matteogeniaccio@yahoo.it>
|
||||||
|
mgrachten <maarten@grachten.eu>
|
||||||
mkiol <mkiol@users.noreply.github.com>
|
mkiol <mkiol@users.noreply.github.com>
|
||||||
|
mky_coder <47767389+mkycoder@users.noreply.github.com>
|
||||||
novag <7754358+novag@users.noreply.github.com>
|
novag <7754358+novag@users.noreply.github.com>
|
||||||
pajowu <pajowu@pajowu.de>
|
pajowu <pajowu@pajowu.de>
|
||||||
|
pengxin99 <pengxin.yuan@intel.com>
|
||||||
|
petterreinholdtsen <pere-github@hungry.com>
|
||||||
polarmoon <90010972+polarmoon@users.noreply.github.com>
|
polarmoon <90010972+polarmoon@users.noreply.github.com>
|
||||||
rlapray <lapray.romain@gmail.com>
|
rlapray <lapray.romain@gmail.com>
|
||||||
sandrohanea <40202887+sandrohanea@users.noreply.github.com>
|
sandrohanea <40202887+sandrohanea@users.noreply.github.com>
|
||||||
@ -287,15 +480,31 @@ shikokuchuo <53399081+shikokuchuo@users.noreply.github.com>
|
|||||||
slaren <slarengh@gmail.com>
|
slaren <slarengh@gmail.com>
|
||||||
slashlib <slashlib@users.noreply.github.com>
|
slashlib <slashlib@users.noreply.github.com>
|
||||||
snadampal <87143774+snadampal@users.noreply.github.com>
|
snadampal <87143774+snadampal@users.noreply.github.com>
|
||||||
|
someone13574 <81528246+someone13574@users.noreply.github.com>
|
||||||
st-gr <38470677+st-gr@users.noreply.github.com>
|
st-gr <38470677+st-gr@users.noreply.github.com>
|
||||||
|
stduhpf <stephduh@live.fr>
|
||||||
|
stormofice <58337328+stormofice@users.noreply.github.com>
|
||||||
texmex76 <40733439+texmex76@users.noreply.github.com>
|
texmex76 <40733439+texmex76@users.noreply.github.com>
|
||||||
thefinaldegree <thefinaldegree@gmail.com>
|
thefinaldegree <thefinaldegree@gmail.com>
|
||||||
|
thewh1teagle <61390950+thewh1teagle@users.noreply.github.com>
|
||||||
|
toboil-features <160222185+toboil-features@users.noreply.github.com>
|
||||||
trixirt <trix@redhat.com>
|
trixirt <trix@redhat.com>
|
||||||
ulatekh <ulatekh@yahoo.com>
|
ulatekh <ulatekh@yahoo.com>
|
||||||
undef <undefdev@gmail.com>
|
undef <undefdev@gmail.com>
|
||||||
|
uvos <devnull@uvos.xyz>
|
||||||
|
uvos <philipp@uvos.xyz>
|
||||||
|
valVk <valVk@users.noreply.github.com>
|
||||||
venkr <venkateshrameshkumar+1@gmail.com>
|
venkr <venkateshrameshkumar+1@gmail.com>
|
||||||
vicalloy <zbirder@gmail.com>
|
vicalloy <zbirder@gmail.com>
|
||||||
|
wangshuai09 <391746016@qq.com>
|
||||||
|
woachk <24752637+woachk@users.noreply.github.com>
|
||||||
|
xctan <axunlei@gmail.com>
|
||||||
xdrudis <xavierdrudis@yahoo.es>
|
xdrudis <xavierdrudis@yahoo.es>
|
||||||
|
yuri@FreeBSD <yuri@FreeBSD>
|
||||||
|
zhangjixiong <code.zjx@gmail.com>
|
||||||
|
zhentaoyu <zhentao.yu@intel.com>
|
||||||
zhouwg <6889919+zhouwg@users.noreply.github.com>
|
zhouwg <6889919+zhouwg@users.noreply.github.com>
|
||||||
|
zhouwg <zhouwg2000@gmail.com>
|
||||||
|
谢乃闻 <sienaiwun@users.noreply.github.com>
|
||||||
布客飞龙 <562826179@qq.com>
|
布客飞龙 <562826179@qq.com>
|
||||||
Артём Земляк <azemlyak@smart-consulting.ru>
|
Артём Земляк <azemlyak@smart-consulting.ru>
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
cmake_minimum_required(VERSION 3.5) # for add_link_options and implicit target directories.
|
cmake_minimum_required(VERSION 3.5) # for add_link_options and implicit target directories.
|
||||||
project("whisper.cpp" C CXX)
|
project("whisper.cpp" C CXX)
|
||||||
project("whisper.cpp" VERSION 1.7.3)
|
project("whisper.cpp" VERSION 1.7.4)
|
||||||
include(CheckIncludeFileCXX)
|
include(CheckIncludeFileCXX)
|
||||||
|
|
||||||
set(SOVERSION 1)
|
set(SOVERSION 1)
|
||||||
|
15
Makefile
15
Makefile
@ -18,17 +18,6 @@ samples:
|
|||||||
@wget --quiet --show-progress -O samples/mm1.wav https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav
|
@wget --quiet --show-progress -O samples/mm1.wav https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav
|
||||||
@wget --quiet --show-progress -O samples/a13.mp3 https://upload.wikimedia.org/wikipedia/commons/transcoded/6/6f/Apollo13-wehaveaproblem.ogg/Apollo13-wehaveaproblem.ogg.mp3
|
@wget --quiet --show-progress -O samples/a13.mp3 https://upload.wikimedia.org/wikipedia/commons/transcoded/6/6f/Apollo13-wehaveaproblem.ogg/Apollo13-wehaveaproblem.ogg.mp3
|
||||||
@wget --quiet --show-progress -O samples/diffusion2023-07-03.flac https://archive.org/download/diffusion2023-07-03/diffusion2023-07-03.flac
|
@wget --quiet --show-progress -O samples/diffusion2023-07-03.flac https://archive.org/download/diffusion2023-07-03/diffusion2023-07-03.flac
|
||||||
@echo "Converting to 16-bit WAV ..."
|
|
||||||
@ffmpeg -loglevel -0 -y -i samples/gb0.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/gb0.wav
|
|
||||||
@ffmpeg -loglevel -0 -y -i samples/gb1.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/gb1.wav
|
|
||||||
@ffmpeg -loglevel -0 -y -i samples/hp0.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/hp0.wav
|
|
||||||
@rm samples/*.ogg
|
|
||||||
@ffmpeg -loglevel -0 -y -i samples/mm1.wav -ar 16000 -ac 1 -c:a pcm_s16le samples/mm0.wav
|
|
||||||
@rm samples/mm1.wav
|
|
||||||
@ffmpeg -loglevel -0 -y -i samples/a13.mp3 -ar 16000 -ac 1 -c:a pcm_s16le -ss 00:00:00 -to 00:00:30 samples/a13.wav
|
|
||||||
@rm samples/a13.mp3
|
|
||||||
@ffmpeg -loglevel -0 -y -i samples/diffusion2023-07-03.flac -ar 16000 -ac 1 -c:a pcm_s16le samples/diffusion2023-07-03.wav
|
|
||||||
@rm samples/diffusion2023-07-03.flac
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Models
|
# Models
|
||||||
@ -59,11 +48,11 @@ tiny.en tiny base.en base small.en small medium.en medium large-v1 large-v2 larg
|
|||||||
@echo "Running $@ on all samples in ./samples ..."
|
@echo "Running $@ on all samples in ./samples ..."
|
||||||
@echo "==============================================="
|
@echo "==============================================="
|
||||||
@echo ""
|
@echo ""
|
||||||
@for f in samples/*.wav; do \
|
@for f in samples/*$(.flac .mp3 .ogg .wav); do \
|
||||||
echo "----------------------------------------------" ; \
|
echo "----------------------------------------------" ; \
|
||||||
echo "[+] Running $@ on $$f ... (run 'ffplay $$f' to listen)" ; \
|
echo "[+] Running $@ on $$f ... (run 'ffplay $$f' to listen)" ; \
|
||||||
echo "----------------------------------------------" ; \
|
echo "----------------------------------------------" ; \
|
||||||
echo "" ; \
|
echo "" ; \
|
||||||
./build/bin/main -m models/ggml-$@.bin -f $$f ; \
|
./build/bin/whisper-cli -m models/ggml-$@.bin -f $$f ; \
|
||||||
echo "" ; \
|
echo "" ; \
|
||||||
done
|
done
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
// swift-tools-version:5.5
|
|
||||||
|
|
||||||
import PackageDescription
|
|
||||||
|
|
||||||
let package = Package(
|
|
||||||
name: "whisper",
|
|
||||||
platforms: [
|
|
||||||
.macOS(.v12),
|
|
||||||
.iOS(.v14),
|
|
||||||
.watchOS(.v4),
|
|
||||||
.tvOS(.v14)
|
|
||||||
],
|
|
||||||
products: [
|
|
||||||
.library(name: "whisper", targets: ["whisper"]),
|
|
||||||
],
|
|
||||||
targets: [
|
|
||||||
.systemLibrary(name: "whisper", pkgConfig: "whisper"),
|
|
||||||
]
|
|
||||||
)
|
|
284
README.md
284
README.md
@ -7,14 +7,17 @@
|
|||||||
[](https://conan.io/center/whisper-cpp)
|
[](https://conan.io/center/whisper-cpp)
|
||||||
[](https://www.npmjs.com/package/whisper.cpp/)
|
[](https://www.npmjs.com/package/whisper.cpp/)
|
||||||
|
|
||||||
Stable: [v1.7.3](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.7.3) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
> [!NOTE]
|
||||||
|
> New maintenance roadmap: https://github.com/ggerganov/whisper.cpp/discussions/2788
|
||||||
|
|
||||||
|
Stable: [v1.7.4](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.7.4) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
||||||
|
|
||||||
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
||||||
|
|
||||||
- Plain C/C++ implementation without dependencies
|
- Plain C/C++ implementation without dependencies
|
||||||
- Apple Silicon first-class citizen - optimized via ARM NEON, Accelerate framework, Metal and [Core ML](#core-ml-support)
|
- Apple Silicon first-class citizen - optimized via ARM NEON, Accelerate framework, Metal and [Core ML](#core-ml-support)
|
||||||
- AVX intrinsics support for x86 architectures
|
- AVX intrinsics support for x86 architectures
|
||||||
- VSX intrinsics support for POWER architectures
|
- [VSX intrinsics support for POWER architectures](#power-vsx-intrinsics)
|
||||||
- Mixed F16 / F32 precision
|
- Mixed F16 / F32 precision
|
||||||
- [Integer quantization support](#quantization)
|
- [Integer quantization support](#quantization)
|
||||||
- Zero memory allocations at runtime
|
- Zero memory allocations at runtime
|
||||||
@ -53,18 +56,6 @@ On Apple Silicon, the inference runs fully on the GPU via Metal:
|
|||||||
|
|
||||||
https://github.com/ggerganov/whisper.cpp/assets/1991296/c82e8f86-60dc-49f2-b048-d2fdbd6b5225
|
https://github.com/ggerganov/whisper.cpp/assets/1991296/c82e8f86-60dc-49f2-b048-d2fdbd6b5225
|
||||||
|
|
||||||
Or you can even run it straight in the browser: [talk.wasm](examples/talk.wasm)
|
|
||||||
|
|
||||||
## Implementation details
|
|
||||||
|
|
||||||
- The core tensor operations are implemented in C ([ggml.h](ggml/include/ggml.h) / [ggml.c](ggml/src/ggml.c))
|
|
||||||
- The transformer model and the high-level C-style API are implemented in C++ ([whisper.h](include/whisper.h) / [whisper.cpp](src/whisper.cpp))
|
|
||||||
- Sample usage is demonstrated in [main.cpp](examples/main)
|
|
||||||
- Sample real-time audio transcription from the microphone is demonstrated in [stream.cpp](examples/stream)
|
|
||||||
- Various other examples are available in the [examples](examples) folder
|
|
||||||
|
|
||||||
The tensor operators are optimized heavily for Apple silicon CPUs. Depending on the computation size, Arm Neon SIMD intrinsics or CBLAS Accelerate framework routines are used. The latter are especially effective for bigger sizes since the Accelerate framework utilizes the special-purpose AMX coprocessor available in modern Apple products.
|
|
||||||
|
|
||||||
## Quick start
|
## Quick start
|
||||||
|
|
||||||
First clone the repository:
|
First clone the repository:
|
||||||
@ -85,135 +76,26 @@ Then, download one of the Whisper [models](models/README.md) converted in [`ggml
|
|||||||
sh ./models/download-ggml-model.sh base.en
|
sh ./models/download-ggml-model.sh base.en
|
||||||
```
|
```
|
||||||
|
|
||||||
Now build the [main](examples/main) example and transcribe an audio file like this:
|
Now build the [whisper-cli](examples/cli) example and transcribe an audio file like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# build the main example
|
# build the project
|
||||||
cmake -B build
|
cmake -B build
|
||||||
cmake --build build --config Release
|
cmake --build build --config Release
|
||||||
|
|
||||||
# transcribe an audio file
|
# transcribe an audio file
|
||||||
./build/bin/main -f samples/jfk.wav
|
./build/bin/whisper-cli -f samples/jfk.wav
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
For a quick demo, simply run `make base.en`:
|
For a quick demo, simply run `make base.en`.
|
||||||
|
|
||||||
```text
|
|
||||||
$ make -j base.en
|
|
||||||
|
|
||||||
cc -I. -O3 -std=c11 -pthread -DGGML_USE_ACCELERATE -c ggml.c -o ggml.o
|
|
||||||
c++ -I. -I./examples -O3 -std=c++11 -pthread -c whisper.cpp -o whisper.o
|
|
||||||
c++ -I. -I./examples -O3 -std=c++11 -pthread examples/main/main.cpp whisper.o ggml.o -o main -framework Accelerate
|
|
||||||
./main -h
|
|
||||||
|
|
||||||
usage: ./main [options] file0.wav file1.wav ...
|
|
||||||
|
|
||||||
options:
|
|
||||||
-h, --help [default] show this help message and exit
|
|
||||||
-t N, --threads N [4 ] number of threads to use during computation
|
|
||||||
-p N, --processors N [1 ] number of processors to use during computation
|
|
||||||
-ot N, --offset-t N [0 ] time offset in milliseconds
|
|
||||||
-on N, --offset-n N [0 ] segment index offset
|
|
||||||
-d N, --duration N [0 ] duration of audio to process in milliseconds
|
|
||||||
-mc N, --max-context N [-1 ] maximum number of text context tokens to store
|
|
||||||
-ml N, --max-len N [0 ] maximum segment length in characters
|
|
||||||
-sow, --split-on-word [false ] split on word rather than on token
|
|
||||||
-bo N, --best-of N [5 ] number of best candidates to keep
|
|
||||||
-bs N, --beam-size N [5 ] beam size for beam search
|
|
||||||
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
|
||||||
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
|
||||||
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
|
||||||
-debug, --debug-mode [false ] enable debug mode (eg. dump log_mel)
|
|
||||||
-tr, --translate [false ] translate from source language to english
|
|
||||||
-di, --diarize [false ] stereo audio diarization
|
|
||||||
-tdrz, --tinydiarize [false ] enable tinydiarize (requires a tdrz model)
|
|
||||||
-nf, --no-fallback [false ] do not use temperature fallback while decoding
|
|
||||||
-otxt, --output-txt [false ] output result in a text file
|
|
||||||
-ovtt, --output-vtt [false ] output result in a vtt file
|
|
||||||
-osrt, --output-srt [false ] output result in a srt file
|
|
||||||
-olrc, --output-lrc [false ] output result in a lrc file
|
|
||||||
-owts, --output-words [false ] output script for generating karaoke video
|
|
||||||
-fp, --font-path [/System/Library/Fonts/Supplemental/Courier New Bold.ttf] path to a monospace font for karaoke video
|
|
||||||
-ocsv, --output-csv [false ] output result in a CSV file
|
|
||||||
-oj, --output-json [false ] output result in a JSON file
|
|
||||||
-ojf, --output-json-full [false ] include more information in the JSON file
|
|
||||||
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
|
|
||||||
-ps, --print-special [false ] print special tokens
|
|
||||||
-pc, --print-colors [false ] print colors
|
|
||||||
-pp, --print-progress [false ] print progress
|
|
||||||
-nt, --no-timestamps [false ] do not print timestamps
|
|
||||||
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
|
||||||
-dl, --detect-language [false ] exit after automatically detecting language
|
|
||||||
--prompt PROMPT [ ] initial prompt
|
|
||||||
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
|
||||||
-f FNAME, --file FNAME [ ] input WAV file path
|
|
||||||
-oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference
|
|
||||||
-ls, --log-score [false ] log best decoder scores of tokens
|
|
||||||
-ng, --no-gpu [false ] disable GPU
|
|
||||||
|
|
||||||
|
|
||||||
sh ./models/download-ggml-model.sh base.en
|
|
||||||
Downloading ggml model base.en ...
|
|
||||||
ggml-base.en.bin 100%[========================>] 141.11M 6.34MB/s in 24s
|
|
||||||
Done! Model 'base.en' saved in 'models/ggml-base.en.bin'
|
|
||||||
You can now use it like this:
|
|
||||||
|
|
||||||
$ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
|
||||||
|
|
||||||
|
|
||||||
===============================================
|
|
||||||
Running base.en on all samples in ./samples ...
|
|
||||||
===============================================
|
|
||||||
|
|
||||||
----------------------------------------------
|
|
||||||
[+] Running base.en on samples/jfk.wav ... (run 'ffplay samples/jfk.wav' to listen)
|
|
||||||
----------------------------------------------
|
|
||||||
|
|
||||||
whisper_init_from_file: loading model from 'models/ggml-base.en.bin'
|
|
||||||
whisper_model_load: loading model
|
|
||||||
whisper_model_load: n_vocab = 51864
|
|
||||||
whisper_model_load: n_audio_ctx = 1500
|
|
||||||
whisper_model_load: n_audio_state = 512
|
|
||||||
whisper_model_load: n_audio_head = 8
|
|
||||||
whisper_model_load: n_audio_layer = 6
|
|
||||||
whisper_model_load: n_text_ctx = 448
|
|
||||||
whisper_model_load: n_text_state = 512
|
|
||||||
whisper_model_load: n_text_head = 8
|
|
||||||
whisper_model_load: n_text_layer = 6
|
|
||||||
whisper_model_load: n_mels = 80
|
|
||||||
whisper_model_load: f16 = 1
|
|
||||||
whisper_model_load: type = 2
|
|
||||||
whisper_model_load: mem required = 215.00 MB (+ 6.00 MB per decoder)
|
|
||||||
whisper_model_load: kv self size = 5.25 MB
|
|
||||||
whisper_model_load: kv cross size = 17.58 MB
|
|
||||||
whisper_model_load: adding 1607 extra tokens
|
|
||||||
whisper_model_load: model ctx = 140.60 MB
|
|
||||||
whisper_model_load: model size = 140.54 MB
|
|
||||||
|
|
||||||
system_info: n_threads = 4 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 |
|
|
||||||
|
|
||||||
main: processing 'samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
|
|
||||||
|
|
||||||
|
|
||||||
[00:00:00.000 --> 00:00:11.000] And so my fellow Americans, ask not what your country can do for you, ask what you can do for your country.
|
|
||||||
|
|
||||||
|
|
||||||
whisper_print_timings: fallbacks = 0 p / 0 h
|
|
||||||
whisper_print_timings: load time = 113.81 ms
|
|
||||||
whisper_print_timings: mel time = 15.40 ms
|
|
||||||
whisper_print_timings: sample time = 11.58 ms / 27 runs ( 0.43 ms per run)
|
|
||||||
whisper_print_timings: encode time = 266.60 ms / 1 runs ( 266.60 ms per run)
|
|
||||||
whisper_print_timings: decode time = 66.11 ms / 27 runs ( 2.45 ms per run)
|
|
||||||
whisper_print_timings: total time = 476.31 ms
|
|
||||||
```
|
|
||||||
|
|
||||||
The command downloads the `base.en` model converted to custom `ggml` format and runs the inference on all `.wav` samples in the folder `samples`.
|
The command downloads the `base.en` model converted to custom `ggml` format and runs the inference on all `.wav` samples in the folder `samples`.
|
||||||
|
|
||||||
For detailed usage instructions, run: `./main -h`
|
For detailed usage instructions, run: `./build/bin/whisper-cli -h`
|
||||||
|
|
||||||
Note that the [main](examples/main) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool.
|
Note that the [whisper-cli](examples/cli) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool.
|
||||||
For example, you can use `ffmpeg` like this:
|
For example, you can use `ffmpeg` like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -257,6 +139,20 @@ make -j large-v3-turbo
|
|||||||
| medium | 1.5 GiB | ~2.1 GB |
|
| medium | 1.5 GiB | ~2.1 GB |
|
||||||
| large | 2.9 GiB | ~3.9 GB |
|
| large | 2.9 GiB | ~3.9 GB |
|
||||||
|
|
||||||
|
## POWER VSX Intrinsics
|
||||||
|
|
||||||
|
`whisper.cpp` supports POWER architectures and includes code which
|
||||||
|
significantly speeds operation on Linux running on POWER9/10, making it
|
||||||
|
capable of faster-than-realtime transcription on underclocked Raptor
|
||||||
|
Talos II. Ensure you have a BLAS package installed, and replace the
|
||||||
|
standard cmake setup with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# build with GGML_BLAS defined
|
||||||
|
cmake -B build -DGGML_BLAS=1
|
||||||
|
cmake --build build --config Release
|
||||||
|
./build/bin/whisper-cli [ .. etc .. ]
|
||||||
|
|
||||||
## Quantization
|
## Quantization
|
||||||
|
|
||||||
`whisper.cpp` supports integer quantization of the Whisper `ggml` models.
|
`whisper.cpp` supports integer quantization of the Whisper `ggml` models.
|
||||||
@ -271,7 +167,7 @@ cmake --build build --config Release
|
|||||||
./build/bin/quantize models/ggml-base.en.bin models/ggml-base.en-q5_0.bin q5_0
|
./build/bin/quantize models/ggml-base.en.bin models/ggml-base.en-q5_0.bin q5_0
|
||||||
|
|
||||||
# run the examples as usual, specifying the quantized model file
|
# run the examples as usual, specifying the quantized model file
|
||||||
./build/bin/main -m models/ggml-base.en-q5_0.bin ./samples/gb0.wav
|
./build/bin/whisper-cli -m models/ggml-base.en-q5_0.bin ./samples/gb0.wav
|
||||||
```
|
```
|
||||||
|
|
||||||
## Core ML support
|
## Core ML support
|
||||||
@ -313,7 +209,7 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in
|
|||||||
- Run the examples as usual. For example:
|
- Run the examples as usual. For example:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
$ ./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/jfk.wav
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
||||||
@ -397,7 +293,7 @@ This can result in significant speedup in encoder performance. Here are the inst
|
|||||||
- Run the examples as usual. For example:
|
- Run the examples as usual. For example:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
$ ./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/jfk.wav
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
||||||
@ -414,7 +310,7 @@ This can result in significant speedup in encoder performance. Here are the inst
|
|||||||
The first time run on an OpenVINO device is slow, since the OpenVINO framework will compile the IR (Intermediate Representation) model to a device-specific 'blob'. This device-specific blob will get
|
The first time run on an OpenVINO device is slow, since the OpenVINO framework will compile the IR (Intermediate Representation) model to a device-specific 'blob'. This device-specific blob will get
|
||||||
cached for the next run.
|
cached for the next run.
|
||||||
|
|
||||||
For more information about the Core ML implementation please refer to PR [#1037](https://github.com/ggerganov/whisper.cpp/pull/1037).
|
For more information about the OpenVINO implementation please refer to PR [#1037](https://github.com/ggerganov/whisper.cpp/pull/1037).
|
||||||
|
|
||||||
## NVIDIA GPU support
|
## NVIDIA GPU support
|
||||||
|
|
||||||
@ -473,7 +369,7 @@ cmake --build build -j --config Release
|
|||||||
Run the inference examples as usual, for example:
|
Run the inference examples as usual, for example:
|
||||||
|
|
||||||
```
|
```
|
||||||
./build/bin/main -f samples/jfk.wav -m models/ggml-base.en.bin -t 8
|
./build/bin/whisper-cli -f samples/jfk.wav -m models/ggml-base.en.bin -t 8
|
||||||
```
|
```
|
||||||
|
|
||||||
*Notes:*
|
*Notes:*
|
||||||
@ -527,89 +423,6 @@ For detailed instructions on how to use Conan, please refer to the [Conan docume
|
|||||||
|
|
||||||
- Inference only
|
- Inference only
|
||||||
|
|
||||||
## Another example
|
|
||||||
|
|
||||||
Here is another example of transcribing a [3:24 min speech](https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg)
|
|
||||||
in about half a minute on a MacBook M1 Pro, using `medium.en` model:
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Expand to see the result</summary>
|
|
||||||
|
|
||||||
```text
|
|
||||||
$ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8
|
|
||||||
|
|
||||||
whisper_init_from_file: loading model from 'models/ggml-medium.en.bin'
|
|
||||||
whisper_model_load: loading model
|
|
||||||
whisper_model_load: n_vocab = 51864
|
|
||||||
whisper_model_load: n_audio_ctx = 1500
|
|
||||||
whisper_model_load: n_audio_state = 1024
|
|
||||||
whisper_model_load: n_audio_head = 16
|
|
||||||
whisper_model_load: n_audio_layer = 24
|
|
||||||
whisper_model_load: n_text_ctx = 448
|
|
||||||
whisper_model_load: n_text_state = 1024
|
|
||||||
whisper_model_load: n_text_head = 16
|
|
||||||
whisper_model_load: n_text_layer = 24
|
|
||||||
whisper_model_load: n_mels = 80
|
|
||||||
whisper_model_load: f16 = 1
|
|
||||||
whisper_model_load: type = 4
|
|
||||||
whisper_model_load: mem required = 1720.00 MB (+ 43.00 MB per decoder)
|
|
||||||
whisper_model_load: kv self size = 42.00 MB
|
|
||||||
whisper_model_load: kv cross size = 140.62 MB
|
|
||||||
whisper_model_load: adding 1607 extra tokens
|
|
||||||
whisper_model_load: model ctx = 1462.35 MB
|
|
||||||
whisper_model_load: model size = 1462.12 MB
|
|
||||||
|
|
||||||
system_info: n_threads = 8 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 |
|
|
||||||
|
|
||||||
main: processing 'samples/gb1.wav' (3179750 samples, 198.7 sec), 8 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
|
|
||||||
|
|
||||||
|
|
||||||
[00:00:00.000 --> 00:00:08.000] My fellow Americans, this day has brought terrible news and great sadness to our country.
|
|
||||||
[00:00:08.000 --> 00:00:17.000] At nine o'clock this morning, Mission Control in Houston lost contact with our Space Shuttle Columbia.
|
|
||||||
[00:00:17.000 --> 00:00:23.000] A short time later, debris was seen falling from the skies above Texas.
|
|
||||||
[00:00:23.000 --> 00:00:29.000] The Columbia's lost. There are no survivors.
|
|
||||||
[00:00:29.000 --> 00:00:32.000] On board was a crew of seven.
|
|
||||||
[00:00:32.000 --> 00:00:39.000] Colonel Rick Husband, Lieutenant Colonel Michael Anderson, Commander Laurel Clark,
|
|
||||||
[00:00:39.000 --> 00:00:48.000] Captain David Brown, Commander William McCool, Dr. Kultna Shavla, and Ilan Ramon,
|
|
||||||
[00:00:48.000 --> 00:00:52.000] a colonel in the Israeli Air Force.
|
|
||||||
[00:00:52.000 --> 00:00:58.000] These men and women assumed great risk in the service to all humanity.
|
|
||||||
[00:00:58.000 --> 00:01:03.000] In an age when space flight has come to seem almost routine,
|
|
||||||
[00:01:03.000 --> 00:01:07.000] it is easy to overlook the dangers of travel by rocket
|
|
||||||
[00:01:07.000 --> 00:01:12.000] and the difficulties of navigating the fierce outer atmosphere of the Earth.
|
|
||||||
[00:01:12.000 --> 00:01:18.000] These astronauts knew the dangers, and they faced them willingly,
|
|
||||||
[00:01:18.000 --> 00:01:23.000] knowing they had a high and noble purpose in life.
|
|
||||||
[00:01:23.000 --> 00:01:31.000] Because of their courage and daring and idealism, we will miss them all the more.
|
|
||||||
[00:01:31.000 --> 00:01:36.000] All Americans today are thinking as well of the families of these men and women
|
|
||||||
[00:01:36.000 --> 00:01:40.000] who have been given this sudden shock and grief.
|
|
||||||
[00:01:40.000 --> 00:01:45.000] You're not alone. Our entire nation grieves with you,
|
|
||||||
[00:01:45.000 --> 00:01:52.000] and those you love will always have the respect and gratitude of this country.
|
|
||||||
[00:01:52.000 --> 00:01:56.000] The cause in which they died will continue.
|
|
||||||
[00:01:56.000 --> 00:02:04.000] Mankind is led into the darkness beyond our world by the inspiration of discovery
|
|
||||||
[00:02:04.000 --> 00:02:11.000] and the longing to understand. Our journey into space will go on.
|
|
||||||
[00:02:11.000 --> 00:02:16.000] In the skies today, we saw destruction and tragedy.
|
|
||||||
[00:02:16.000 --> 00:02:22.000] Yet farther than we can see, there is comfort and hope.
|
|
||||||
[00:02:22.000 --> 00:02:29.000] In the words of the prophet Isaiah, "Lift your eyes and look to the heavens
|
|
||||||
[00:02:29.000 --> 00:02:35.000] who created all these. He who brings out the starry hosts one by one
|
|
||||||
[00:02:35.000 --> 00:02:39.000] and calls them each by name."
|
|
||||||
[00:02:39.000 --> 00:02:46.000] Because of His great power and mighty strength, not one of them is missing.
|
|
||||||
[00:02:46.000 --> 00:02:55.000] The same Creator who names the stars also knows the names of the seven souls we mourn today.
|
|
||||||
[00:02:55.000 --> 00:03:01.000] The crew of the shuttle Columbia did not return safely to earth,
|
|
||||||
[00:03:01.000 --> 00:03:05.000] yet we can pray that all are safely home.
|
|
||||||
[00:03:05.000 --> 00:03:13.000] May God bless the grieving families, and may God continue to bless America.
|
|
||||||
[00:03:13.000 --> 00:03:19.000] [Silence]
|
|
||||||
|
|
||||||
|
|
||||||
whisper_print_timings: fallbacks = 1 p / 0 h
|
|
||||||
whisper_print_timings: load time = 569.03 ms
|
|
||||||
whisper_print_timings: mel time = 146.85 ms
|
|
||||||
whisper_print_timings: sample time = 238.66 ms / 553 runs ( 0.43 ms per run)
|
|
||||||
whisper_print_timings: encode time = 18665.10 ms / 9 runs ( 2073.90 ms per run)
|
|
||||||
whisper_print_timings: decode time = 13090.93 ms / 549 runs ( 23.85 ms per run)
|
|
||||||
whisper_print_timings: total time = 32733.52 ms
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
## Real-time audio input example
|
## Real-time audio input example
|
||||||
|
|
||||||
This is a naive example of performing real-time inference on audio from your microphone.
|
This is a naive example of performing real-time inference on audio from your microphone.
|
||||||
@ -617,9 +430,9 @@ The [stream](examples/stream) tool samples the audio every half a second and run
|
|||||||
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cmake -B build
|
cmake -B build -DWHISPER_SDL2=ON
|
||||||
cmake --build build --config Release
|
cmake --build build --config Release
|
||||||
./build/bin/stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
./build/bin/whisper-stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
||||||
```
|
```
|
||||||
|
|
||||||
https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a80f-28ba83be7d09.mp4
|
https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a80f-28ba83be7d09.mp4
|
||||||
@ -630,7 +443,7 @@ Adding the `--print-colors` argument will print the transcribed text using an ex
|
|||||||
to highlight words with high or low confidence:
|
to highlight words with high or low confidence:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./main -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors
|
./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors
|
||||||
```
|
```
|
||||||
|
|
||||||
<img width="965" alt="image" src="https://user-images.githubusercontent.com/1991296/197356445-311c8643-9397-4e5e-b46e-0b4b4daa2530.png">
|
<img width="965" alt="image" src="https://user-images.githubusercontent.com/1991296/197356445-311c8643-9397-4e5e-b46e-0b4b4daa2530.png">
|
||||||
@ -640,7 +453,7 @@ to highlight words with high or low confidence:
|
|||||||
For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`:
|
For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16
|
$ ./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16
|
||||||
|
|
||||||
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
||||||
...
|
...
|
||||||
@ -664,7 +477,7 @@ main: processing './samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 pr
|
|||||||
The `--max-len` argument can be used to obtain word-level timestamps. Simply use `-ml 1`:
|
The `--max-len` argument can be used to obtain word-level timestamps. Simply use `-ml 1`:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1
|
$ ./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1
|
||||||
|
|
||||||
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
||||||
...
|
...
|
||||||
@ -711,7 +524,7 @@ Sample usage:
|
|||||||
./models/download-ggml-model.sh small.en-tdrz
|
./models/download-ggml-model.sh small.en-tdrz
|
||||||
|
|
||||||
# run as usual, adding the "-tdrz" command-line argument
|
# run as usual, adding the "-tdrz" command-line argument
|
||||||
./main -f ./samples/a13.wav -m ./models/ggml-small.en-tdrz.bin -tdrz
|
./build/bin/whisper-cli -f ./samples/a13.wav -m ./models/ggml-small.en-tdrz.bin -tdrz
|
||||||
...
|
...
|
||||||
main: processing './samples/a13.wav' (480000 samples, 30.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, tdrz = 1, timestamps = 1 ...
|
main: processing './samples/a13.wav' (480000 samples, 30.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, tdrz = 1, timestamps = 1 ...
|
||||||
...
|
...
|
||||||
@ -728,14 +541,14 @@ main: processing './samples/a13.wav' (480000 samples, 30.0 sec), 4 threads, 1 pr
|
|||||||
|
|
||||||
## Karaoke-style movie generation (experimental)
|
## Karaoke-style movie generation (experimental)
|
||||||
|
|
||||||
The [main](examples/main) example provides support for output of karaoke-style movies, where the
|
The [whisper-cli](examples/cli) example provides support for output of karaoke-style movies, where the
|
||||||
currently pronounced word is highlighted. Use the `-wts` argument and run the generated bash script.
|
currently pronounced word is highlighted. Use the `-wts` argument and run the generated bash script.
|
||||||
This requires to have `ffmpeg` installed.
|
This requires to have `ffmpeg` installed.
|
||||||
|
|
||||||
Here are a few _"typical"_ examples:
|
Here are a few _"typical"_ examples:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts
|
./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts
|
||||||
source ./samples/jfk.wav.wts
|
source ./samples/jfk.wav.wts
|
||||||
ffplay ./samples/jfk.wav.mp4
|
ffplay ./samples/jfk.wav.mp4
|
||||||
```
|
```
|
||||||
@ -745,7 +558,7 @@ https://user-images.githubusercontent.com/1991296/199337465-dbee4b5e-9aeb-48a3-b
|
|||||||
---
|
---
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts
|
./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts
|
||||||
source ./samples/mm0.wav.wts
|
source ./samples/mm0.wav.wts
|
||||||
ffplay ./samples/mm0.wav.mp4
|
ffplay ./samples/mm0.wav.mp4
|
||||||
```
|
```
|
||||||
@ -755,7 +568,7 @@ https://user-images.githubusercontent.com/1991296/199337504-cc8fd233-0cb7-4920-9
|
|||||||
---
|
---
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts
|
./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts
|
||||||
source ./samples/gb0.wav.wts
|
source ./samples/gb0.wav.wts
|
||||||
ffplay ./samples/gb0.wav.mp4
|
ffplay ./samples/gb0.wav.mp4
|
||||||
```
|
```
|
||||||
@ -780,7 +593,7 @@ https://user-images.githubusercontent.com/1991296/223206245-2d36d903-cf8e-4f09-8
|
|||||||
## Benchmarks
|
## Benchmarks
|
||||||
|
|
||||||
In order to have an objective comparison of the performance of the inference across different system configurations,
|
In order to have an objective comparison of the performance of the inference across different system configurations,
|
||||||
use the [bench](examples/bench) tool. The tool simply runs the Encoder part of the model and prints how much time it
|
use the [whisper-bench](examples/bench) tool. The tool simply runs the Encoder part of the model and prints how much time it
|
||||||
took to execute it. The results are summarized in the following Github issue:
|
took to execute it. The results are summarized in the following Github issue:
|
||||||
|
|
||||||
[Benchmark results](https://github.com/ggerganov/whisper.cpp/issues/89)
|
[Benchmark results](https://github.com/ggerganov/whisper.cpp/issues/89)
|
||||||
@ -843,13 +656,12 @@ Some of the examples are even ported to run in the browser using WebAssembly. Ch
|
|||||||
|
|
||||||
| Example | Web | Description |
|
| Example | Web | Description |
|
||||||
| --------------------------------------------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
| --------------------------------------------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| [main](examples/main) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper |
|
| [whisper-cli](examples/cli) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper |
|
||||||
| [bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine |
|
| [whisper-bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine |
|
||||||
| [stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture |
|
| [whisper-stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture |
|
||||||
| [command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic |
|
| [whisper-command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic |
|
||||||
| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess |
|
| [whisper-server](examples/server) | | HTTP transcription server with OAI-like API |
|
||||||
| [talk](examples/talk) | [talk.wasm](examples/talk.wasm) | Talk with a GPT-2 bot |
|
| [whisper-talk-llama](examples/talk-llama) | | Talk with a LLaMA bot |
|
||||||
| [talk-llama](examples/talk-llama) | | Talk with a LLaMA bot |
|
|
||||||
| [whisper.objc](examples/whisper.objc) | | iOS mobile application using whisper.cpp |
|
| [whisper.objc](examples/whisper.objc) | | iOS mobile application using whisper.cpp |
|
||||||
| [whisper.swiftui](examples/whisper.swiftui) | | SwiftUI iOS / macOS application using whisper.cpp |
|
| [whisper.swiftui](examples/whisper.swiftui) | | SwiftUI iOS / macOS application using whisper.cpp |
|
||||||
| [whisper.android](examples/whisper.android) | | Android mobile application using whisper.cpp |
|
| [whisper.android](examples/whisper.android) | | Android mobile application using whisper.cpp |
|
||||||
@ -857,7 +669,7 @@ Some of the examples are even ported to run in the browser using WebAssembly. Ch
|
|||||||
| [generate-karaoke.sh](examples/generate-karaoke.sh) | | Helper script to easily [generate a karaoke video](https://youtu.be/uj7hVta4blM) of raw audio capture |
|
| [generate-karaoke.sh](examples/generate-karaoke.sh) | | Helper script to easily [generate a karaoke video](https://youtu.be/uj7hVta4blM) of raw audio capture |
|
||||||
| [livestream.sh](examples/livestream.sh) | | [Livestream audio transcription](https://github.com/ggerganov/whisper.cpp/issues/185) |
|
| [livestream.sh](examples/livestream.sh) | | [Livestream audio transcription](https://github.com/ggerganov/whisper.cpp/issues/185) |
|
||||||
| [yt-wsp.sh](examples/yt-wsp.sh) | | Download + transcribe and/or translate any VOD [(original)](https://gist.github.com/DaniruKun/96f763ec1a037cc92fe1a059b643b818) |
|
| [yt-wsp.sh](examples/yt-wsp.sh) | | Download + transcribe and/or translate any VOD [(original)](https://gist.github.com/DaniruKun/96f763ec1a037cc92fe1a059b643b818) |
|
||||||
| [server](examples/server) | | HTTP transcription server with OAI-like API |
|
| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess |
|
||||||
|
|
||||||
## [Discussions](https://github.com/ggerganov/whisper.cpp/discussions)
|
## [Discussions](https://github.com/ggerganov/whisper.cpp/discussions)
|
||||||
|
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
module whisper [system] {
|
|
||||||
header "whisper.h"
|
|
||||||
link "whisper"
|
|
||||||
export *
|
|
||||||
}
|
|
@ -1,4 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <whisper.h>
|
|
||||||
|
|
@ -9,22 +9,23 @@ import (
|
|||||||
// ContextForSignal returns a context object which is cancelled when a signal
|
// ContextForSignal returns a context object which is cancelled when a signal
|
||||||
// is received. It returns nil if no signal parameter is provided
|
// is received. It returns nil if no signal parameter is provided
|
||||||
func ContextForSignal(signals ...os.Signal) context.Context {
|
func ContextForSignal(signals ...os.Signal) context.Context {
|
||||||
if len(signals) == 0 {
|
if len(signals) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ch := make(chan os.Signal)
|
ch := make(chan os.Signal, 1) // Buffered channel with space for 1 signal
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
// Send message on channel when signal received
|
// Send message on channel when signal received
|
||||||
signal.Notify(ch, signals...)
|
signal.Notify(ch, signals...)
|
||||||
|
|
||||||
// When any signal received, call cancel
|
// When any signal is received, call cancel
|
||||||
go func() {
|
go func() {
|
||||||
<-ch
|
<-ch
|
||||||
cancel()
|
cancel()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Return success
|
// Return success
|
||||||
return ctx
|
return ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@ -17,14 +18,27 @@ import (
|
|||||||
// CONSTANTS
|
// CONSTANTS
|
||||||
|
|
||||||
const (
|
const (
|
||||||
srcUrl = "https://huggingface.co/ggerganov/whisper.cpp/resolve/main" // The location of the models
|
srcUrl = "https://huggingface.co/ggerganov/whisper.cpp/resolve/main/" // The location of the models
|
||||||
srcExt = ".bin" // Filename extension
|
srcExt = ".bin" // Filename extension
|
||||||
bufSize = 1024 * 64 // Size of the buffer used for downloading the model
|
bufSize = 1024 * 64 // Size of the buffer used for downloading the model
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// The models which will be downloaded, if no model is specified as an argument
|
// The models which will be downloaded, if no model is specified as an argument
|
||||||
modelNames = []string{"ggml-tiny.en", "ggml-tiny", "ggml-base.en", "ggml-base", "ggml-small.en", "ggml-small", "ggml-medium.en", "ggml-medium", "ggml-large-v1", "ggml-large-v2", "ggml-large-v3", "large-v3-turbo"}
|
modelNames = []string{
|
||||||
|
"tiny", "tiny-q5_1", "tiny-q8_0",
|
||||||
|
"tiny.en", "tiny.en-q5_1", "tiny.en-q8_0",
|
||||||
|
"base", "base-q5_1", "base-q8_0",
|
||||||
|
"base.en", "base.en-q5_1", "base.en-q8_0",
|
||||||
|
"small", "small-q5_1", "small-q8_0",
|
||||||
|
"small.en", "small.en-q5_1", "small.en-q8_0",
|
||||||
|
"medium", "medium-q5_0", "medium-q8_0",
|
||||||
|
"medium.en", "medium.en-q5_0", "medium.en-q8_0",
|
||||||
|
"large-v1",
|
||||||
|
"large-v2", "large-v2-q5_0", "large-v2-q8_0",
|
||||||
|
"large-v3", "large-v3-q5_0",
|
||||||
|
"large-v3-turbo", "large-v3-turbo-q5_0", "large-v3-turbo-q8_0",
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -44,7 +58,25 @@ var (
|
|||||||
func main() {
|
func main() {
|
||||||
flag.Usage = func() {
|
flag.Usage = func() {
|
||||||
name := filepath.Base(flag.CommandLine.Name())
|
name := filepath.Base(flag.CommandLine.Name())
|
||||||
fmt.Fprintf(flag.CommandLine.Output(), "Usage: %s [options] <model>\n\n", name)
|
fmt.Fprintf(flag.CommandLine.Output(), `
|
||||||
|
Usage: %s [options] [<model>...]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
-out string Specify the output folder where models will be saved.
|
||||||
|
Default: Current working directory.
|
||||||
|
-timeout duration Set the maximum duration for downloading a model.
|
||||||
|
Example: 10m, 1h (default: 30m0s).
|
||||||
|
-quiet Suppress all output except errors.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
1. Download a specific model:
|
||||||
|
%s -out ./models tiny-q8_0
|
||||||
|
|
||||||
|
2. Download all models:
|
||||||
|
%s -out ./models
|
||||||
|
|
||||||
|
`, name, name, name)
|
||||||
|
|
||||||
flag.PrintDefaults()
|
flag.PrintDefaults()
|
||||||
}
|
}
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
@ -114,23 +146,87 @@ func GetOut() (string, error) {
|
|||||||
// GetModels returns the list of models to download
|
// GetModels returns the list of models to download
|
||||||
func GetModels() []string {
|
func GetModels() []string {
|
||||||
if flag.NArg() == 0 {
|
if flag.NArg() == 0 {
|
||||||
return modelNames
|
fmt.Println("No model specified.")
|
||||||
} else {
|
fmt.Println("Preparing to download all models...")
|
||||||
return flag.Args()
|
|
||||||
|
// Calculate total download size
|
||||||
|
fmt.Println("Calculating total download size...")
|
||||||
|
totalSize, err := CalculateTotalDownloadSize(modelNames)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error calculating download sizes:", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("View available models: https://huggingface.co/ggerganov/whisper.cpp/tree/main")
|
||||||
|
fmt.Printf("Total download size: %.2f GB\n", float64(totalSize)/(1024*1024*1024))
|
||||||
|
fmt.Println("Would you like to download all models? (y/N)")
|
||||||
|
|
||||||
|
// Prompt for user input
|
||||||
|
var response string
|
||||||
|
fmt.Scanln(&response)
|
||||||
|
if response != "y" && response != "Y" {
|
||||||
|
fmt.Println("Aborting. Specify a model to download.")
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return modelNames // Return all models if confirmed
|
||||||
}
|
}
|
||||||
|
return flag.Args() // Return specific models if arguments are provided
|
||||||
|
}
|
||||||
|
|
||||||
|
func CalculateTotalDownloadSize(models []string) (int64, error) {
|
||||||
|
var totalSize int64
|
||||||
|
client := http.Client{}
|
||||||
|
|
||||||
|
for _, model := range models {
|
||||||
|
modelURL, err := URLForModel(model)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Issue a HEAD request to get the file size
|
||||||
|
req, err := http.NewRequest("HEAD", modelURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
fmt.Printf("Warning: Unable to fetch size for %s (HTTP %d)\n", model, resp.StatusCode)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
size := resp.ContentLength
|
||||||
|
totalSize += size
|
||||||
|
}
|
||||||
|
return totalSize, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// URLForModel returns the URL for the given model on huggingface.co
|
// URLForModel returns the URL for the given model on huggingface.co
|
||||||
func URLForModel(model string) (string, error) {
|
func URLForModel(model string) (string, error) {
|
||||||
|
// Ensure "ggml-" prefix is added only once
|
||||||
|
if !strings.HasPrefix(model, "ggml-") {
|
||||||
|
model = "ggml-" + model
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure ".bin" extension is added only once
|
||||||
if filepath.Ext(model) != srcExt {
|
if filepath.Ext(model) != srcExt {
|
||||||
model += srcExt
|
model += srcExt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse the base URL
|
||||||
url, err := url.Parse(srcUrl)
|
url, err := url.Parse(srcUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
} else {
|
|
||||||
url.Path = filepath.Join(url.Path, model)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure no trailing slash in the base URL
|
||||||
|
url.Path = fmt.Sprintf("%s/%s", strings.TrimSuffix(url.Path, "/"), model)
|
||||||
return url.String(), nil
|
return url.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,11 +181,11 @@ public class WhisperFullParams extends Structure {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** Flag to suppress non-speech tokens. */
|
/** Flag to suppress non-speech tokens. */
|
||||||
public CBool suppress_non_speech_tokens;
|
public CBool suppress_nst;
|
||||||
|
|
||||||
/** Flag to suppress non-speech tokens. */
|
/** Flag to suppress non-speech tokens. */
|
||||||
public void suppressNonSpeechTokens(boolean enable) {
|
public void suppressNonSpeechTokens(boolean enable) {
|
||||||
suppress_non_speech_tokens = enable ? CBool.TRUE : CBool.FALSE;
|
suppress_nst = enable ? CBool.TRUE : CBool.FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Initial decoding temperature. */
|
/** Initial decoding temperature. */
|
||||||
@ -315,7 +315,7 @@ public class WhisperFullParams extends Structure {
|
|||||||
"print_special", "print_progress", "print_realtime", "print_timestamps", "token_timestamps",
|
"print_special", "print_progress", "print_realtime", "print_timestamps", "token_timestamps",
|
||||||
"thold_pt", "thold_ptsum", "max_len", "split_on_word", "max_tokens", "audio_ctx",
|
"thold_pt", "thold_ptsum", "max_len", "split_on_word", "max_tokens", "audio_ctx",
|
||||||
"tdrz_enable", "suppress_regex", "initial_prompt", "prompt_tokens", "prompt_n_tokens", "language", "detect_language",
|
"tdrz_enable", "suppress_regex", "initial_prompt", "prompt_tokens", "prompt_n_tokens", "language", "detect_language",
|
||||||
"suppress_blank", "suppress_non_speech_tokens", "temperature", "max_initial_ts", "length_penalty",
|
"suppress_blank", "suppress_nst", "temperature", "max_initial_ts", "length_penalty",
|
||||||
"temperature_inc", "entropy_thold", "logprob_thold", "no_speech_thold", "greedy", "beam_search",
|
"temperature_inc", "entropy_thold", "logprob_thold", "no_speech_thold", "greedy", "beam_search",
|
||||||
"new_segment_callback", "new_segment_callback_user_data",
|
"new_segment_callback", "new_segment_callback_user_data",
|
||||||
"progress_callback", "progress_callback_user_data",
|
"progress_callback", "progress_callback_user_data",
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "whisper.cpp",
|
"name": "whisper.cpp",
|
||||||
"version": "1.7.3",
|
"version": "1.7.4",
|
||||||
"description": "Whisper speech recognition",
|
"description": "Whisper speech recognition",
|
||||||
"main": "whisper.js",
|
"main": "whisper.js",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
4
bindings/ruby/.gitignore
vendored
4
bindings/ruby/.gitignore
vendored
@ -1,5 +1,3 @@
|
|||||||
LICENSE
|
LICENSE
|
||||||
pkg/
|
pkg/
|
||||||
lib/whisper.so
|
lib/whisper.*
|
||||||
lib/whisper.bundle
|
|
||||||
lib/whisper.dll
|
|
||||||
|
@ -24,14 +24,15 @@ require "whisper"
|
|||||||
|
|
||||||
whisper = Whisper::Context.new("base")
|
whisper = Whisper::Context.new("base")
|
||||||
|
|
||||||
params = Whisper::Params.new
|
params = Whisper::Params.new(
|
||||||
params.language = "en"
|
language: "en",
|
||||||
params.offset = 10_000
|
offset: 10_000,
|
||||||
params.duration = 60_000
|
duration: 60_000,
|
||||||
params.max_text_tokens = 300
|
max_text_tokens: 300,
|
||||||
params.translate = true
|
translate: true,
|
||||||
params.print_timestamps = false
|
print_timestamps: false,
|
||||||
params.initial_prompt = "Initial prompt here."
|
initial_prompt: "Initial prompt here."
|
||||||
|
)
|
||||||
|
|
||||||
whisper.transcribe("path/to/audio.wav", params) do |whole_text|
|
whisper.transcribe("path/to/audio.wav", params) do |whole_text|
|
||||||
puts whole_text
|
puts whole_text
|
||||||
@ -60,10 +61,10 @@ You also can use shorthand for pre-converted models:
|
|||||||
whisper = Whisper::Context.new("base.en")
|
whisper = Whisper::Context.new("base.en")
|
||||||
```
|
```
|
||||||
|
|
||||||
You can see the list of prepared model names by `Whisper::Model.preconverted_models.keys`:
|
You can see the list of prepared model names by `Whisper::Model.pre_converted_models.keys`:
|
||||||
|
|
||||||
```ruby
|
```ruby
|
||||||
puts Whisper::Model.preconverted_model_names
|
puts Whisper::Model.pre_converted_models.keys
|
||||||
# tiny
|
# tiny
|
||||||
# tiny.en
|
# tiny.en
|
||||||
# tiny-q5_1
|
# tiny-q5_1
|
||||||
@ -87,8 +88,9 @@ whisper = Whisper::Context.new("path/to/your/model.bin")
|
|||||||
Or, you can download model files:
|
Or, you can download model files:
|
||||||
|
|
||||||
```ruby
|
```ruby
|
||||||
model_uri = Whisper::Model::URI.new("http://example.net/uri/of/your/model.bin")
|
whisper = Whisper::Context.new("https://example.net/uri/of/your/model.bin")
|
||||||
whisper = Whisper::Context.new(model_uri)
|
# Or
|
||||||
|
whisper = Whisper::Context.new(URI("https://example.net/uri/of/your/model.bin"))
|
||||||
```
|
```
|
||||||
|
|
||||||
See [models][] page for details.
|
See [models][] page for details.
|
||||||
@ -112,18 +114,18 @@ def format_time(time_ms)
|
|||||||
"%02d:%02d:%02d.%03d" % [hour, min, sec, decimal_part]
|
"%02d:%02d:%02d.%03d" % [hour, min, sec, decimal_part]
|
||||||
end
|
end
|
||||||
|
|
||||||
whisper.transcribe("path/to/audio.wav", params)
|
whisper
|
||||||
|
.transcribe("path/to/audio.wav", params)
|
||||||
whisper.each_segment.with_index do |segment, index|
|
.each_segment.with_index do |segment, index|
|
||||||
line = "[%{nth}: %{st} --> %{ed}] %{text}" % {
|
line = "[%{nth}: %{st} --> %{ed}] %{text}" % {
|
||||||
nth: index + 1,
|
nth: index + 1,
|
||||||
st: format_time(segment.start_time),
|
st: format_time(segment.start_time),
|
||||||
ed: format_time(segment.end_time),
|
ed: format_time(segment.end_time),
|
||||||
text: segment.text
|
text: segment.text
|
||||||
}
|
}
|
||||||
line << " (speaker turned)" if segment.speaker_next_turn?
|
line << " (speaker turned)" if segment.speaker_next_turn?
|
||||||
puts line
|
puts line
|
||||||
end
|
end
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -214,13 +216,25 @@ reader = WaveFile::Reader.new("path/to/audio.wav", WaveFile::Format.new(:mono, :
|
|||||||
samples = reader.enum_for(:each_buffer).map(&:samples).flatten
|
samples = reader.enum_for(:each_buffer).map(&:samples).flatten
|
||||||
|
|
||||||
whisper = Whisper::Context.new("base")
|
whisper = Whisper::Context.new("base")
|
||||||
whisper.full(Whisper::Params.new, samples)
|
whisper
|
||||||
whisper.each_segment do |segment|
|
.full(Whisper::Params.new, samples)
|
||||||
puts segment.text
|
.each_segment do |segment|
|
||||||
end
|
puts segment.text
|
||||||
|
end
|
||||||
```
|
```
|
||||||
|
|
||||||
The second argument `samples` may be an array, an object with `length` method, or a MemoryView. If you can prepare audio data as C array and export it as a MemoryView, whispercpp accepts and works with it with zero copy.
|
The second argument `samples` may be an array, an object with `length` and `each` method, or a MemoryView. If you can prepare audio data as C array and export it as a MemoryView, whispercpp accepts and works with it with zero copy.
|
||||||
|
|
||||||
|
Development
|
||||||
|
-----------
|
||||||
|
|
||||||
|
% git clone https://github.com/ggerganov/whisper.cpp.git
|
||||||
|
% cd whisper.cpp/bindings/ruby
|
||||||
|
% rake test
|
||||||
|
|
||||||
|
First call of `rake test` builds an extension and downloads a model for testing. After that, you add tests in `tests` directory and modify `ext/ruby_whisper.cpp`.
|
||||||
|
|
||||||
|
If something seems wrong on build, running `rake clean` solves some cases.
|
||||||
|
|
||||||
License
|
License
|
||||||
-------
|
-------
|
||||||
|
@ -18,9 +18,11 @@ EXTSOURCES.each do |src|
|
|||||||
end
|
end
|
||||||
|
|
||||||
CLEAN.include SOURCES
|
CLEAN.include SOURCES
|
||||||
CLEAN.include FileList["ext/*.o", "ext/*.metal", "ext/whisper.{so,bundle,dll}"]
|
CLEAN.include FileList["ext/**/*.o", "ext/**/*.metal", "ext/**/*.tmp", "ext/whisper.{so,bundle,dll}"]
|
||||||
|
|
||||||
task build: ["ext/Makefile", "ext/ruby_whisper.h", "ext/ruby_whisper.cpp", "whispercpp.gemspec"]
|
SRC = FileList["ext/*.{c,cpp,h}"]
|
||||||
|
|
||||||
|
task build: SOURCES
|
||||||
|
|
||||||
directory "pkg"
|
directory "pkg"
|
||||||
CLOBBER.include "pkg"
|
CLOBBER.include "pkg"
|
||||||
@ -29,14 +31,14 @@ LIB_NAME = "whisper".ext(RbConfig::CONFIG["DLEXT"])
|
|||||||
SO_FILE = File.join("ext", LIB_NAME)
|
SO_FILE = File.join("ext", LIB_NAME)
|
||||||
LIB_FILE = File.join("lib", LIB_NAME)
|
LIB_FILE = File.join("lib", LIB_NAME)
|
||||||
|
|
||||||
file "ext/Makefile" => ["ext/extconf.rb", "ext/ruby_whisper.h", "ext/ruby_whisper.cpp"] + SOURCES do |t|
|
file "ext/Makefile" => SRC + ["ext/extconf.rb"] + SOURCES do |t|
|
||||||
Dir.chdir "ext" do
|
chdir "ext" do
|
||||||
ruby "extconf.rb"
|
ruby "extconf.rb"
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
file SO_FILE => "ext/Makefile" do |t|
|
file SO_FILE => "ext/Makefile" do |t|
|
||||||
Dir.chdir "ext" do
|
chdir "ext" do
|
||||||
sh "make"
|
sh "make"
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -54,7 +56,7 @@ end
|
|||||||
|
|
||||||
TEST_MEMORY_VIEW = "tests/jfk_reader/jfk_reader.#{RbConfig::CONFIG['DLEXT']}"
|
TEST_MEMORY_VIEW = "tests/jfk_reader/jfk_reader.#{RbConfig::CONFIG['DLEXT']}"
|
||||||
file TEST_MEMORY_VIEW => "tests/jfk_reader/jfk_reader.c" do |t|
|
file TEST_MEMORY_VIEW => "tests/jfk_reader/jfk_reader.c" do |t|
|
||||||
Dir.chdir "tests/jfk_reader" do
|
chdir "tests/jfk_reader" do
|
||||||
ruby "extconf.rb"
|
ruby "extconf.rb"
|
||||||
sh "make"
|
sh "make"
|
||||||
end
|
end
|
||||||
|
12
bindings/ruby/ext/.gitignore
vendored
12
bindings/ruby/ext/.gitignore
vendored
@ -4,10 +4,8 @@ whisper.bundle
|
|||||||
whisper.dll
|
whisper.dll
|
||||||
scripts/get-flags.mk
|
scripts/get-flags.mk
|
||||||
*.o
|
*.o
|
||||||
*.c
|
/*/**/*.c
|
||||||
*.cpp
|
/*/**/*.cpp
|
||||||
*.h
|
/*/**/*.h
|
||||||
*.m
|
/*/**/*.m
|
||||||
*.metal
|
/*/**/*.metal
|
||||||
!ruby_whisper.cpp
|
|
||||||
!ruby_whisper.h
|
|
||||||
|
@ -35,7 +35,7 @@ if $GGML_METAL
|
|||||||
$GGML_METAL_EMBED_LIBRARY = true
|
$GGML_METAL_EMBED_LIBRARY = true
|
||||||
end
|
end
|
||||||
|
|
||||||
$MK_CPPFLAGS = '-Iggml/include -Iggml/src -Iggml/src/ggml-cpu -Iinclude -Isrc -Iexamples'
|
$MK_CPPFLAGS = '-Iggml/include -Iggml/src -Iggml/src/ggml-cpu -Iinclude -Isrc -Iexamples -DGGML_USE_CPU'
|
||||||
$MK_CFLAGS = '-std=c11 -fPIC'
|
$MK_CFLAGS = '-std=c11 -fPIC'
|
||||||
$MK_CXXFLAGS = '-std=c++17 -fPIC'
|
$MK_CXXFLAGS = '-std=c++17 -fPIC'
|
||||||
$MK_NVCCFLAGS = '-std=c++17'
|
$MK_NVCCFLAGS = '-std=c++17'
|
||||||
@ -171,10 +171,19 @@ $OBJ_GGML <<
|
|||||||
'ggml/src/ggml-cpu/ggml-cpu-traits.o'
|
'ggml/src/ggml-cpu/ggml-cpu-traits.o'
|
||||||
|
|
||||||
$OBJ_WHISPER <<
|
$OBJ_WHISPER <<
|
||||||
'src/whisper.o'
|
'src/whisper.o' <<
|
||||||
|
'examples/common.o' <<
|
||||||
|
'examples/common-whisper.o'
|
||||||
|
|
||||||
$objs = $OBJ_GGML + $OBJ_WHISPER + $OBJ_COMMON + $OBJ_SDL
|
$objs = $OBJ_GGML + $OBJ_WHISPER + $OBJ_COMMON + $OBJ_SDL
|
||||||
$objs << "ruby_whisper.o"
|
$objs <<
|
||||||
|
"ruby_whisper.o" <<
|
||||||
|
"ruby_whisper_context.o" <<
|
||||||
|
"ruby_whisper_transcribe.o" <<
|
||||||
|
"ruby_whisper_params.o" <<
|
||||||
|
"ruby_whisper_error.o" <<
|
||||||
|
"ruby_whisper_segment.o" <<
|
||||||
|
"ruby_whisper_model.o"
|
||||||
|
|
||||||
$CPPFLAGS = "#{$MK_CPPFLAGS} #{$CPPFLAGS}"
|
$CPPFLAGS = "#{$MK_CPPFLAGS} #{$CPPFLAGS}"
|
||||||
$CFLAGS = "#{$CPPFLAGS} #{$MK_CFLAGS} #{$GF_CFLAGS} #{$CFLAGS}"
|
$CFLAGS = "#{$CPPFLAGS} #{$MK_CFLAGS} #{$GF_CFLAGS} #{$CFLAGS}"
|
||||||
|
164
bindings/ruby/ext/ruby_whisper.c
Normal file
164
bindings/ruby/ext/ruby_whisper.c
Normal file
@ -0,0 +1,164 @@
|
|||||||
|
#include <ruby.h>
|
||||||
|
#include <ruby/memory_view.h>
|
||||||
|
#include "ruby_whisper.h"
|
||||||
|
|
||||||
|
VALUE mWhisper;
|
||||||
|
VALUE cContext;
|
||||||
|
VALUE cParams;
|
||||||
|
VALUE eError;
|
||||||
|
|
||||||
|
VALUE cSegment;
|
||||||
|
VALUE cModel;
|
||||||
|
|
||||||
|
ID id_to_s;
|
||||||
|
ID id_call;
|
||||||
|
ID id___method__;
|
||||||
|
ID id_to_enum;
|
||||||
|
ID id_length;
|
||||||
|
ID id_next;
|
||||||
|
ID id_new;
|
||||||
|
ID id_to_path;
|
||||||
|
ID id_URI;
|
||||||
|
ID id_pre_converted_models;
|
||||||
|
|
||||||
|
static bool is_log_callback_finalized = false;
|
||||||
|
|
||||||
|
// High level API
|
||||||
|
extern VALUE ruby_whisper_segment_allocate(VALUE klass);
|
||||||
|
|
||||||
|
extern void init_ruby_whisper_context(VALUE *mWhisper);
|
||||||
|
extern void init_ruby_whisper_params(VALUE *mWhisper);
|
||||||
|
extern void init_ruby_whisper_error(VALUE *mWhisper);
|
||||||
|
extern void init_ruby_whisper_segment(VALUE *mWhisper, VALUE *cSegment);
|
||||||
|
extern void init_ruby_whisper_model(VALUE *mWhisper);
|
||||||
|
extern void register_callbacks(ruby_whisper_params *rwp, VALUE *context);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* lang_max_id -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE ruby_whisper_s_lang_max_id(VALUE self) {
|
||||||
|
return INT2NUM(whisper_lang_max_id());
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* lang_id(lang_name) -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE ruby_whisper_s_lang_id(VALUE self, VALUE lang) {
|
||||||
|
const char * lang_str = StringValueCStr(lang);
|
||||||
|
const int id = whisper_lang_id(lang_str);
|
||||||
|
if (-1 == id) {
|
||||||
|
rb_raise(rb_eArgError, "language not found: %s", lang_str);
|
||||||
|
}
|
||||||
|
return INT2NUM(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* lang_str(lang_id) -> String
|
||||||
|
*/
|
||||||
|
static VALUE ruby_whisper_s_lang_str(VALUE self, VALUE id) {
|
||||||
|
const int lang_id = NUM2INT(id);
|
||||||
|
const char * str = whisper_lang_str(lang_id);
|
||||||
|
if (NULL == str) {
|
||||||
|
rb_raise(rb_eIndexError, "id %d outside of language id", lang_id);
|
||||||
|
}
|
||||||
|
return rb_str_new2(str);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* lang_str(lang_id) -> String
|
||||||
|
*/
|
||||||
|
static VALUE ruby_whisper_s_lang_str_full(VALUE self, VALUE id) {
|
||||||
|
const int lang_id = NUM2INT(id);
|
||||||
|
const char * str_full = whisper_lang_str_full(lang_id);
|
||||||
|
if (NULL == str_full) {
|
||||||
|
rb_raise(rb_eIndexError, "id %d outside of language id", lang_id);
|
||||||
|
}
|
||||||
|
return rb_str_new2(str_full);
|
||||||
|
}
|
||||||
|
|
||||||
|
static VALUE ruby_whisper_s_finalize_log_callback(VALUE self, VALUE id) {
|
||||||
|
is_log_callback_finalized = true;
|
||||||
|
return Qnil;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
ruby_whisper_log_callback(enum ggml_log_level level, const char * buffer, void * user_data) {
|
||||||
|
if (is_log_callback_finalized) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
VALUE log_callback = rb_iv_get(mWhisper, "log_callback");
|
||||||
|
VALUE udata = rb_iv_get(mWhisper, "user_data");
|
||||||
|
rb_funcall(log_callback, id_call, 3, INT2NUM(level), rb_str_new2(buffer), udata);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* log_set ->(level, buffer, user_data) { ... }, user_data -> nil
|
||||||
|
*/
|
||||||
|
static VALUE ruby_whisper_s_log_set(VALUE self, VALUE log_callback, VALUE user_data) {
|
||||||
|
VALUE old_callback = rb_iv_get(self, "log_callback");
|
||||||
|
if (!NIL_P(old_callback)) {
|
||||||
|
rb_undefine_finalizer(old_callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
rb_iv_set(self, "log_callback", log_callback);
|
||||||
|
rb_iv_set(self, "user_data", user_data);
|
||||||
|
|
||||||
|
VALUE finalize_log_callback = rb_funcall(mWhisper, rb_intern("method"), 1, rb_str_new2("finalize_log_callback"));
|
||||||
|
rb_define_finalizer(log_callback, finalize_log_callback);
|
||||||
|
|
||||||
|
whisper_log_set(ruby_whisper_log_callback, NULL);
|
||||||
|
|
||||||
|
return Qnil;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rb_whisper_model_mark(ruby_whisper_model *rwm) {
|
||||||
|
rb_gc_mark(rwm->context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static VALUE ruby_whisper_model_allocate(VALUE klass) {
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
rwm = ALLOC(ruby_whisper_model);
|
||||||
|
return Data_Wrap_Struct(klass, rb_whisper_model_mark, RUBY_DEFAULT_FREE, rwm);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Init_whisper() {
|
||||||
|
id_to_s = rb_intern("to_s");
|
||||||
|
id_call = rb_intern("call");
|
||||||
|
id___method__ = rb_intern("__method__");
|
||||||
|
id_to_enum = rb_intern("to_enum");
|
||||||
|
id_length = rb_intern("length");
|
||||||
|
id_next = rb_intern("next");
|
||||||
|
id_new = rb_intern("new");
|
||||||
|
id_to_path = rb_intern("to_path");
|
||||||
|
id_URI = rb_intern("URI");
|
||||||
|
id_pre_converted_models = rb_intern("pre_converted_models");
|
||||||
|
|
||||||
|
mWhisper = rb_define_module("Whisper");
|
||||||
|
|
||||||
|
rb_define_const(mWhisper, "LOG_LEVEL_NONE", INT2NUM(GGML_LOG_LEVEL_NONE));
|
||||||
|
rb_define_const(mWhisper, "LOG_LEVEL_INFO", INT2NUM(GGML_LOG_LEVEL_INFO));
|
||||||
|
rb_define_const(mWhisper, "LOG_LEVEL_WARN", INT2NUM(GGML_LOG_LEVEL_WARN));
|
||||||
|
rb_define_const(mWhisper, "LOG_LEVEL_ERROR", INT2NUM(GGML_LOG_LEVEL_ERROR));
|
||||||
|
rb_define_const(mWhisper, "LOG_LEVEL_DEBUG", INT2NUM(GGML_LOG_LEVEL_DEBUG));
|
||||||
|
rb_define_const(mWhisper, "LOG_LEVEL_CONT", INT2NUM(GGML_LOG_LEVEL_CONT));
|
||||||
|
|
||||||
|
rb_define_singleton_method(mWhisper, "lang_max_id", ruby_whisper_s_lang_max_id, 0);
|
||||||
|
rb_define_singleton_method(mWhisper, "lang_id", ruby_whisper_s_lang_id, 1);
|
||||||
|
rb_define_singleton_method(mWhisper, "lang_str", ruby_whisper_s_lang_str, 1);
|
||||||
|
rb_define_singleton_method(mWhisper, "lang_str_full", ruby_whisper_s_lang_str_full, 1);
|
||||||
|
rb_define_singleton_method(mWhisper, "log_set", ruby_whisper_s_log_set, 2);
|
||||||
|
rb_define_private_method(rb_singleton_class(mWhisper), "finalize_log_callback", ruby_whisper_s_finalize_log_callback, 1);
|
||||||
|
|
||||||
|
init_ruby_whisper_context(&mWhisper);
|
||||||
|
init_ruby_whisper_params(&mWhisper);
|
||||||
|
init_ruby_whisper_error(&mWhisper);
|
||||||
|
init_ruby_whisper_segment(&mWhisper, &cContext);
|
||||||
|
init_ruby_whisper_model(&mWhisper);
|
||||||
|
|
||||||
|
rb_require("whisper/model/uri");
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
|||||||
#ifndef __RUBY_WHISPER_H
|
#ifndef RUBY_WHISPER_H
|
||||||
#define __RUBY_WHISPER_H
|
#define RUBY_WHISPER_H
|
||||||
|
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
|
||||||
@ -22,4 +22,13 @@ typedef struct {
|
|||||||
ruby_whisper_callback_container *abort_callback_container;
|
ruby_whisper_callback_container *abort_callback_container;
|
||||||
} ruby_whisper_params;
|
} ruby_whisper_params;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
VALUE context;
|
||||||
|
int index;
|
||||||
|
} ruby_whisper_segment;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
VALUE context;
|
||||||
|
} ruby_whisper_model;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
613
bindings/ruby/ext/ruby_whisper_context.c
Normal file
613
bindings/ruby/ext/ruby_whisper_context.c
Normal file
@ -0,0 +1,613 @@
|
|||||||
|
#include <ruby.h>
|
||||||
|
#include <ruby/memory_view.h>
|
||||||
|
#include "ruby_whisper.h"
|
||||||
|
|
||||||
|
extern ID id_to_s;
|
||||||
|
extern ID id___method__;
|
||||||
|
extern ID id_to_enum;
|
||||||
|
extern ID id_length;
|
||||||
|
extern ID id_next;
|
||||||
|
extern ID id_new;
|
||||||
|
extern ID id_to_path;
|
||||||
|
extern ID id_URI;
|
||||||
|
extern ID id_pre_converted_models;
|
||||||
|
|
||||||
|
extern VALUE cContext;
|
||||||
|
extern VALUE eError;
|
||||||
|
extern VALUE cModel;
|
||||||
|
|
||||||
|
extern VALUE ruby_whisper_transcribe(int argc, VALUE *argv, VALUE self);
|
||||||
|
extern VALUE rb_whisper_model_initialize(VALUE context);
|
||||||
|
extern VALUE rb_whisper_segment_initialize(VALUE context, int index);
|
||||||
|
extern void register_callbacks(ruby_whisper_params *rwp, VALUE *context);
|
||||||
|
|
||||||
|
static void
|
||||||
|
ruby_whisper_free(ruby_whisper *rw)
|
||||||
|
{
|
||||||
|
if (rw->context) {
|
||||||
|
whisper_free(rw->context);
|
||||||
|
rw->context = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
rb_whisper_mark(ruby_whisper *rw)
|
||||||
|
{
|
||||||
|
// call rb_gc_mark on any ruby references in rw
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
rb_whisper_free(ruby_whisper *rw)
|
||||||
|
{
|
||||||
|
ruby_whisper_free(rw);
|
||||||
|
free(rw);
|
||||||
|
}
|
||||||
|
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_allocate(VALUE klass)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
rw = ALLOC(ruby_whisper);
|
||||||
|
rw->context = NULL;
|
||||||
|
return Data_Wrap_Struct(klass, rb_whisper_mark, rb_whisper_free, rw);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* new("base.en") -> Whisper::Context
|
||||||
|
* new("path/to/model.bin") -> Whisper::Context
|
||||||
|
* new(Whisper::Model::URI.new("https://example.net/uri/of/model.bin")) -> Whisper::Context
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_initialize(int argc, VALUE *argv, VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
VALUE whisper_model_file_path;
|
||||||
|
|
||||||
|
// TODO: we can support init from buffer here too maybe another ruby object to expose
|
||||||
|
rb_scan_args(argc, argv, "01", &whisper_model_file_path);
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
|
||||||
|
VALUE pre_converted_models = rb_funcall(cModel, id_pre_converted_models, 0);
|
||||||
|
VALUE pre_converted_model = rb_hash_aref(pre_converted_models, whisper_model_file_path);
|
||||||
|
if (!NIL_P(pre_converted_model)) {
|
||||||
|
whisper_model_file_path = pre_converted_model;
|
||||||
|
}
|
||||||
|
if (TYPE(whisper_model_file_path) == T_STRING) {
|
||||||
|
const char * whisper_model_file_path_str = StringValueCStr(whisper_model_file_path);
|
||||||
|
if (strncmp("http://", whisper_model_file_path_str, 7) == 0 || strncmp("https://", whisper_model_file_path_str, 8) == 0) {
|
||||||
|
VALUE uri_class = rb_const_get(cModel, id_URI);
|
||||||
|
whisper_model_file_path = rb_class_new_instance(1, &whisper_model_file_path, uri_class);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (rb_obj_is_kind_of(whisper_model_file_path, rb_path2class("URI::HTTP"))) {
|
||||||
|
VALUE uri_class = rb_const_get(cModel, id_URI);
|
||||||
|
whisper_model_file_path = rb_class_new_instance(1, &whisper_model_file_path, uri_class);
|
||||||
|
}
|
||||||
|
if (rb_respond_to(whisper_model_file_path, id_to_path)) {
|
||||||
|
whisper_model_file_path = rb_funcall(whisper_model_file_path, id_to_path, 0);
|
||||||
|
}
|
||||||
|
if (!rb_respond_to(whisper_model_file_path, id_to_s)) {
|
||||||
|
rb_raise(rb_eRuntimeError, "Expected file path to model to initialize Whisper::Context");
|
||||||
|
}
|
||||||
|
rw->context = whisper_init_from_file_with_params(StringValueCStr(whisper_model_file_path), whisper_context_default_params());
|
||||||
|
if (rw->context == NULL) {
|
||||||
|
rb_raise(rb_eRuntimeError, "error: failed to initialize whisper context");
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model_n_vocab -> Integer
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_model_n_vocab(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_vocab(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model_n_audio_ctx -> Integer
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_model_n_audio_ctx(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_audio_ctx(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model_n_audio_state -> Integer
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_model_n_audio_state(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_audio_state(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model_n_audio_head -> Integer
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_model_n_audio_head(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_audio_head(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model_n_audio_layer -> Integer
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_model_n_audio_layer(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_audio_layer(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model_n_text_ctx -> Integer
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_model_n_text_ctx(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_text_ctx(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model_n_text_state -> Integer
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_model_n_text_state(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_text_state(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model_n_text_head -> Integer
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_model_n_text_head(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_text_head(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model_n_text_layer -> Integer
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_model_n_text_layer(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_text_layer(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model_n_mels -> Integer
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_model_n_mels(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_mels(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model_ftype -> Integer
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_model_ftype(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_ftype(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model_type -> String
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_model_type(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return rb_str_new2(whisper_model_type_readable(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text
|
||||||
|
* Not thread safe for same context
|
||||||
|
* Uses the specified decoding strategy to obtain the text.
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* full(params, samples, n_samples) -> nil
|
||||||
|
* full(params, samples) -> nil
|
||||||
|
*
|
||||||
|
* The second argument +samples+ must be an array of samples, respond to :length, or be a MemoryView of an array of float. It must be 32 bit float PCM audio data.
|
||||||
|
*/
|
||||||
|
VALUE ruby_whisper_full(int argc, VALUE *argv, VALUE self)
|
||||||
|
{
|
||||||
|
if (argc < 2 || argc > 3) {
|
||||||
|
rb_raise(rb_eArgError, "wrong number of arguments (given %d, expected 2..3)", argc);
|
||||||
|
}
|
||||||
|
|
||||||
|
ruby_whisper *rw;
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
VALUE params = argv[0];
|
||||||
|
Data_Get_Struct(params, ruby_whisper_params, rwp);
|
||||||
|
VALUE samples = argv[1];
|
||||||
|
int n_samples;
|
||||||
|
rb_memory_view_t view;
|
||||||
|
const bool memory_view_available_p = rb_memory_view_available_p(samples);
|
||||||
|
if (argc == 3) {
|
||||||
|
n_samples = NUM2INT(argv[2]);
|
||||||
|
if (TYPE(samples) == T_ARRAY) {
|
||||||
|
if (RARRAY_LEN(samples) < n_samples) {
|
||||||
|
rb_raise(rb_eArgError, "samples length %ld is less than n_samples %d", RARRAY_LEN(samples), n_samples);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Should check when samples.respond_to?(:length)?
|
||||||
|
} else {
|
||||||
|
if (TYPE(samples) == T_ARRAY) {
|
||||||
|
n_samples = RARRAY_LEN(samples);
|
||||||
|
} else if (memory_view_available_p) {
|
||||||
|
if (!rb_memory_view_get(samples, &view, RUBY_MEMORY_VIEW_SIMPLE)) {
|
||||||
|
view.obj = Qnil;
|
||||||
|
rb_raise(rb_eArgError, "unable to get a memory view");
|
||||||
|
}
|
||||||
|
n_samples = view.byte_size / view.item_size;
|
||||||
|
} else if (rb_respond_to(samples, id_length)) {
|
||||||
|
n_samples = NUM2INT(rb_funcall(samples, id_length, 0));
|
||||||
|
} else {
|
||||||
|
rb_raise(rb_eArgError, "samples must respond to :length or be a MemoryView of an array of flaot when n_samples is not given");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
float * c_samples = (float *)malloc(n_samples * sizeof(float));
|
||||||
|
if (memory_view_available_p) {
|
||||||
|
c_samples = (float *)view.data;
|
||||||
|
} else {
|
||||||
|
if (TYPE(samples) == T_ARRAY) {
|
||||||
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
c_samples[i] = RFLOAT_VALUE(rb_ary_entry(samples, i));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// TODO: use rb_block_call
|
||||||
|
VALUE iter = rb_funcall(samples, id_to_enum, 1, rb_str_new2("each"));
|
||||||
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
// TODO: check if iter is exhausted and raise ArgumentError appropriately
|
||||||
|
VALUE sample = rb_funcall(iter, id_next, 0);
|
||||||
|
c_samples[i] = RFLOAT_VALUE(sample);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
register_callbacks(rwp, &self);
|
||||||
|
const int result = whisper_full(rw->context, rwp->params, c_samples, n_samples);
|
||||||
|
if (0 == result) {
|
||||||
|
return self;
|
||||||
|
} else {
|
||||||
|
rb_exc_raise(rb_funcall(eError, id_new, 1, result));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Split the input audio in chunks and process each chunk separately using whisper_full_with_state()
|
||||||
|
* Result is stored in the default state of the context
|
||||||
|
* Not thread safe if executed in parallel on the same context.
|
||||||
|
* It seems this approach can offer some speedup in some cases.
|
||||||
|
* However, the transcription accuracy can be worse at the beginning and end of each chunk.
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* full_parallel(params, samples) -> nil
|
||||||
|
* full_parallel(params, samples, n_samples) -> nil
|
||||||
|
* full_parallel(params, samples, n_samples, n_processors) -> nil
|
||||||
|
* full_parallel(params, samples, nil, n_processors) -> nil
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_full_parallel(int argc, VALUE *argv,VALUE self)
|
||||||
|
{
|
||||||
|
if (argc < 2 || argc > 4) {
|
||||||
|
rb_raise(rb_eArgError, "wrong number of arguments (given %d, expected 2..3)", argc);
|
||||||
|
}
|
||||||
|
|
||||||
|
ruby_whisper *rw;
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
VALUE params = argv[0];
|
||||||
|
Data_Get_Struct(params, ruby_whisper_params, rwp);
|
||||||
|
VALUE samples = argv[1];
|
||||||
|
int n_samples;
|
||||||
|
int n_processors;
|
||||||
|
rb_memory_view_t view;
|
||||||
|
const bool memory_view_available_p = rb_memory_view_available_p(samples);
|
||||||
|
switch (argc) {
|
||||||
|
case 2:
|
||||||
|
n_processors = 1;
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
n_processors = 1;
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
n_processors = NUM2INT(argv[3]);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (argc >= 3 && !NIL_P(argv[2])) {
|
||||||
|
n_samples = NUM2INT(argv[2]);
|
||||||
|
if (TYPE(samples) == T_ARRAY) {
|
||||||
|
if (RARRAY_LEN(samples) < n_samples) {
|
||||||
|
rb_raise(rb_eArgError, "samples length %ld is less than n_samples %d", RARRAY_LEN(samples), n_samples);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Should check when samples.respond_to?(:length)?
|
||||||
|
} else if (memory_view_available_p) {
|
||||||
|
if (!rb_memory_view_get(samples, &view, RUBY_MEMORY_VIEW_SIMPLE)) {
|
||||||
|
view.obj = Qnil;
|
||||||
|
rb_raise(rb_eArgError, "unable to get a memory view");
|
||||||
|
}
|
||||||
|
n_samples = view.byte_size / view.item_size;
|
||||||
|
} else {
|
||||||
|
if (TYPE(samples) == T_ARRAY) {
|
||||||
|
n_samples = RARRAY_LEN(samples);
|
||||||
|
} else if (rb_respond_to(samples, id_length)) {
|
||||||
|
n_samples = NUM2INT(rb_funcall(samples, id_length, 0));
|
||||||
|
} else {
|
||||||
|
rb_raise(rb_eArgError, "samples must respond to :length or be a MemoryView of an array of flaot when n_samples is not given");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
float * c_samples = (float *)malloc(n_samples * sizeof(float));
|
||||||
|
if (memory_view_available_p) {
|
||||||
|
c_samples = (float *)view.data;
|
||||||
|
} else {
|
||||||
|
if (TYPE(samples) == T_ARRAY) {
|
||||||
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
c_samples[i] = RFLOAT_VALUE(rb_ary_entry(samples, i));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// FIXME: use rb_block_call
|
||||||
|
VALUE iter = rb_funcall(samples, id_to_enum, 1, rb_str_new2("each"));
|
||||||
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
// TODO: check if iter is exhausted and raise ArgumentError
|
||||||
|
VALUE sample = rb_funcall(iter, id_next, 0);
|
||||||
|
c_samples[i] = RFLOAT_VALUE(sample);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
register_callbacks(rwp, &self);
|
||||||
|
const int result = whisper_full_parallel(rw->context, rwp->params, c_samples, n_samples, n_processors);
|
||||||
|
if (0 == result) {
|
||||||
|
return self;
|
||||||
|
} else {
|
||||||
|
rb_exc_raise(rb_funcall(eError, id_new, 1, result));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Number of segments.
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* full_n_segments -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_full_n_segments(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_full_n_segments(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Language ID, which can be converted to string by Whisper.lang_str and Whisper.lang_str_full.
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* full_lang_id -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_full_lang_id(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_full_lang_id(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ruby_whisper_full_check_segment_index(const ruby_whisper * rw, const VALUE i_segment)
|
||||||
|
{
|
||||||
|
const int c_i_segment = NUM2INT(i_segment);
|
||||||
|
if (c_i_segment < 0 || c_i_segment >= whisper_full_n_segments(rw->context)) {
|
||||||
|
rb_raise(rb_eIndexError, "segment index %d out of range", c_i_segment);
|
||||||
|
}
|
||||||
|
return c_i_segment;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Start time of a segment indexed by +segment_index+ in centiseconds (10 times milliseconds).
|
||||||
|
*
|
||||||
|
* full_get_segment_t0(3) # => 1668 (16680 ms)
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* full_get_segment_t0(segment_index) -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_full_get_segment_t0(VALUE self, VALUE i_segment)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
const int c_i_segment = ruby_whisper_full_check_segment_index(rw, i_segment);
|
||||||
|
const int64_t t0 = whisper_full_get_segment_t0(rw->context, c_i_segment);
|
||||||
|
return INT2NUM(t0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* End time of a segment indexed by +segment_index+ in centiseconds (10 times milliseconds).
|
||||||
|
*
|
||||||
|
* full_get_segment_t1(3) # => 1668 (16680 ms)
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* full_get_segment_t1(segment_index) -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_full_get_segment_t1(VALUE self, VALUE i_segment)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
const int c_i_segment = ruby_whisper_full_check_segment_index(rw, i_segment);
|
||||||
|
const int64_t t1 = whisper_full_get_segment_t1(rw->context, c_i_segment);
|
||||||
|
return INT2NUM(t1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Whether the next segment indexed by +segment_index+ is predicated as a speaker turn.
|
||||||
|
*
|
||||||
|
* full_get_segment_speacker_turn_next(3) # => true
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* full_get_segment_speacker_turn_next(segment_index) -> bool
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_full_get_segment_speaker_turn_next(VALUE self, VALUE i_segment)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
const int c_i_segment = ruby_whisper_full_check_segment_index(rw, i_segment);
|
||||||
|
const bool speaker_turn_next = whisper_full_get_segment_speaker_turn_next(rw->context, c_i_segment);
|
||||||
|
return speaker_turn_next ? Qtrue : Qfalse;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Text of a segment indexed by +segment_index+.
|
||||||
|
*
|
||||||
|
* full_get_segment_text(3) # => "ask not what your country can do for you, ..."
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* full_get_segment_text(segment_index) -> String
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_full_get_segment_text(VALUE self, VALUE i_segment)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
const int c_i_segment = ruby_whisper_full_check_segment_index(rw, i_segment);
|
||||||
|
const char * text = whisper_full_get_segment_text(rw->context, c_i_segment);
|
||||||
|
return rb_str_new2(text);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* full_get_segment_no_speech_prob(segment_index) -> Float
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_full_get_segment_no_speech_prob(VALUE self, VALUE i_segment)
|
||||||
|
{
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
const int c_i_segment = ruby_whisper_full_check_segment_index(rw, i_segment);
|
||||||
|
const float no_speech_prob = whisper_full_get_segment_no_speech_prob(rw->context, c_i_segment);
|
||||||
|
return DBL2NUM(no_speech_prob);
|
||||||
|
}
|
||||||
|
|
||||||
|
// High level API
|
||||||
|
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_full_get_segment(VALUE self, VALUE i_segment)
|
||||||
|
{
|
||||||
|
return rb_whisper_segment_initialize(self, NUM2INT(i_segment));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Yields each Whisper::Segment:
|
||||||
|
*
|
||||||
|
* whisper.transcribe("path/to/audio.wav", params)
|
||||||
|
* whisper.each_segment do |segment|
|
||||||
|
* puts segment.text
|
||||||
|
* end
|
||||||
|
*
|
||||||
|
* Returns an Enumerator if no block given:
|
||||||
|
*
|
||||||
|
* whisper.transcribe("path/to/audio.wav", params)
|
||||||
|
* enum = whisper.each_segment
|
||||||
|
* enum.to_a # => [#<Whisper::Segment>, ...]
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* each_segment {|segment| ... }
|
||||||
|
* each_segment -> Enumerator
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_each_segment(VALUE self)
|
||||||
|
{
|
||||||
|
if (!rb_block_given_p()) {
|
||||||
|
const VALUE method_name = rb_funcall(self, id___method__, 0);
|
||||||
|
return rb_funcall(self, id_to_enum, 1, method_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
|
||||||
|
const int n_segments = whisper_full_n_segments(rw->context);
|
||||||
|
for (int i = 0; i < n_segments; ++i) {
|
||||||
|
rb_yield(rb_whisper_segment_initialize(self, i));
|
||||||
|
}
|
||||||
|
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* model -> Whisper::Model
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_get_model(VALUE self)
|
||||||
|
{
|
||||||
|
return rb_whisper_model_initialize(self);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
init_ruby_whisper_context(VALUE *mWhisper)
|
||||||
|
{
|
||||||
|
cContext = rb_define_class_under(*mWhisper, "Context", rb_cObject);
|
||||||
|
|
||||||
|
rb_define_alloc_func(cContext, ruby_whisper_allocate);
|
||||||
|
rb_define_method(cContext, "initialize", ruby_whisper_initialize, -1);
|
||||||
|
|
||||||
|
rb_define_method(cContext, "transcribe", ruby_whisper_transcribe, -1);
|
||||||
|
rb_define_method(cContext, "model_n_vocab", ruby_whisper_model_n_vocab, 0);
|
||||||
|
rb_define_method(cContext, "model_n_audio_ctx", ruby_whisper_model_n_audio_ctx, 0);
|
||||||
|
rb_define_method(cContext, "model_n_audio_state", ruby_whisper_model_n_audio_state, 0);
|
||||||
|
rb_define_method(cContext, "model_n_audio_head", ruby_whisper_model_n_audio_head, 0);
|
||||||
|
rb_define_method(cContext, "model_n_audio_layer", ruby_whisper_model_n_audio_layer, 0);
|
||||||
|
rb_define_method(cContext, "model_n_text_ctx", ruby_whisper_model_n_text_ctx, 0);
|
||||||
|
rb_define_method(cContext, "model_n_text_state", ruby_whisper_model_n_text_state, 0);
|
||||||
|
rb_define_method(cContext, "model_n_text_head", ruby_whisper_model_n_text_head, 0);
|
||||||
|
rb_define_method(cContext, "model_n_text_layer", ruby_whisper_model_n_text_layer, 0);
|
||||||
|
rb_define_method(cContext, "model_n_mels", ruby_whisper_model_n_mels, 0);
|
||||||
|
rb_define_method(cContext, "model_ftype", ruby_whisper_model_ftype, 0);
|
||||||
|
rb_define_method(cContext, "model_type", ruby_whisper_model_type, 0);
|
||||||
|
rb_define_method(cContext, "full_n_segments", ruby_whisper_full_n_segments, 0);
|
||||||
|
rb_define_method(cContext, "full_lang_id", ruby_whisper_full_lang_id, 0);
|
||||||
|
rb_define_method(cContext, "full_get_segment_t0", ruby_whisper_full_get_segment_t0, 1);
|
||||||
|
rb_define_method(cContext, "full_get_segment_t1", ruby_whisper_full_get_segment_t1, 1);
|
||||||
|
rb_define_method(cContext, "full_get_segment_speaker_turn_next", ruby_whisper_full_get_segment_speaker_turn_next, 1);
|
||||||
|
rb_define_method(cContext, "full_get_segment_text", ruby_whisper_full_get_segment_text, 1);
|
||||||
|
rb_define_method(cContext, "full_get_segment_no_speech_prob", ruby_whisper_full_get_segment_no_speech_prob, 1);
|
||||||
|
rb_define_method(cContext, "full", ruby_whisper_full, -1);
|
||||||
|
rb_define_method(cContext, "full_parallel", ruby_whisper_full_parallel, -1);
|
||||||
|
|
||||||
|
// High leve
|
||||||
|
rb_define_method(cContext, "full_get_segment", ruby_whisper_full_get_segment, 1);
|
||||||
|
rb_define_method(cContext, "each_segment", ruby_whisper_each_segment, 0);
|
||||||
|
|
||||||
|
rb_define_method(cContext, "model", ruby_whisper_get_model, 0);
|
||||||
|
}
|
52
bindings/ruby/ext/ruby_whisper_error.c
Normal file
52
bindings/ruby/ext/ruby_whisper_error.c
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
#include <ruby.h>
|
||||||
|
|
||||||
|
extern VALUE eError;
|
||||||
|
|
||||||
|
VALUE ruby_whisper_error_initialize(VALUE self, VALUE code)
|
||||||
|
{
|
||||||
|
const int c_code = NUM2INT(code);
|
||||||
|
const char *raw_message;
|
||||||
|
switch (c_code) {
|
||||||
|
case -2:
|
||||||
|
raw_message = "failed to compute log mel spectrogram";
|
||||||
|
break;
|
||||||
|
case -3:
|
||||||
|
raw_message = "failed to auto-detect language";
|
||||||
|
break;
|
||||||
|
case -4:
|
||||||
|
raw_message = "too many decoders requested";
|
||||||
|
break;
|
||||||
|
case -5:
|
||||||
|
raw_message = "audio_ctx is larger than the maximum allowed";
|
||||||
|
break;
|
||||||
|
case -6:
|
||||||
|
raw_message = "failed to encode";
|
||||||
|
break;
|
||||||
|
case -7:
|
||||||
|
raw_message = "whisper_kv_cache_init() failed for self-attention cache";
|
||||||
|
break;
|
||||||
|
case -8:
|
||||||
|
raw_message = "failed to decode";
|
||||||
|
break;
|
||||||
|
case -9:
|
||||||
|
raw_message = "failed to decode";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
raw_message = "unknown error";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
const VALUE message = rb_str_new2(raw_message);
|
||||||
|
rb_call_super(1, &message);
|
||||||
|
rb_iv_set(self, "@code", code);
|
||||||
|
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
init_ruby_whisper_error(VALUE *mWhisper)
|
||||||
|
{
|
||||||
|
eError = rb_define_class_under(*mWhisper, "Error", rb_eStandardError);
|
||||||
|
|
||||||
|
rb_define_attr(eError, "code", true, false);
|
||||||
|
rb_define_method(eError, "initialize", ruby_whisper_error_initialize, 1);
|
||||||
|
}
|
210
bindings/ruby/ext/ruby_whisper_model.c
Normal file
210
bindings/ruby/ext/ruby_whisper_model.c
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
#include <ruby.h>
|
||||||
|
#include "ruby_whisper.h"
|
||||||
|
|
||||||
|
extern VALUE cModel;
|
||||||
|
|
||||||
|
static void rb_whisper_model_mark(ruby_whisper_model *rwm) {
|
||||||
|
rb_gc_mark(rwm->context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static VALUE ruby_whisper_model_allocate(VALUE klass) {
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
rwm = ALLOC(ruby_whisper_model);
|
||||||
|
return Data_Wrap_Struct(klass, rb_whisper_model_mark, RUBY_DEFAULT_FREE, rwm);
|
||||||
|
}
|
||||||
|
|
||||||
|
VALUE rb_whisper_model_initialize(VALUE context) {
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
const VALUE model = ruby_whisper_model_allocate(cModel);
|
||||||
|
Data_Get_Struct(model, ruby_whisper_model, rwm);
|
||||||
|
rwm->context = context;
|
||||||
|
return model;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* n_vocab -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_model_n_vocab(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_model, rwm);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rwm->context, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_vocab(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* n_audio_ctx -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_model_n_audio_ctx(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_model, rwm);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rwm->context, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_audio_ctx(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* n_audio_state -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_model_n_audio_state(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_model, rwm);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rwm->context, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_audio_state(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* n_audio_head -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_model_n_audio_head(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_model, rwm);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rwm->context, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_audio_head(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* n_audio_layer -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_model_n_audio_layer(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_model, rwm);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rwm->context, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_audio_layer(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* n_text_ctx -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_model_n_text_ctx(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_model, rwm);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rwm->context, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_text_ctx(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* n_text_state -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_model_n_text_state(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_model, rwm);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rwm->context, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_text_state(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* n_text_head -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_model_n_text_head(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_model, rwm);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rwm->context, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_text_head(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* n_text_layer -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_model_n_text_layer(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_model, rwm);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rwm->context, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_text_layer(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* n_mels -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_model_n_mels(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_model, rwm);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rwm->context, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_n_mels(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* ftype -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_model_ftype(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_model, rwm);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rwm->context, ruby_whisper, rw);
|
||||||
|
return INT2NUM(whisper_model_ftype(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* type -> String
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_model_type(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_model *rwm;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_model, rwm);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rwm->context, ruby_whisper, rw);
|
||||||
|
return rb_str_new2(whisper_model_type_readable(rw->context));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
init_ruby_whisper_model(VALUE *mWhisper)
|
||||||
|
{
|
||||||
|
cModel = rb_define_class_under(*mWhisper, "Model", rb_cObject);
|
||||||
|
|
||||||
|
rb_define_alloc_func(cModel, ruby_whisper_model_allocate);
|
||||||
|
rb_define_method(cModel, "n_vocab", ruby_whisper_model_n_vocab, 0);
|
||||||
|
rb_define_method(cModel, "n_audio_ctx", ruby_whisper_model_n_audio_ctx, 0);
|
||||||
|
rb_define_method(cModel, "n_audio_state", ruby_whisper_model_n_audio_state, 0);
|
||||||
|
rb_define_method(cModel, "n_audio_head", ruby_whisper_model_n_audio_head, 0);
|
||||||
|
rb_define_method(cModel, "n_audio_layer", ruby_whisper_model_n_audio_layer, 0);
|
||||||
|
rb_define_method(cModel, "n_text_ctx", ruby_whisper_model_n_text_ctx, 0);
|
||||||
|
rb_define_method(cModel, "n_text_state", ruby_whisper_model_n_text_state, 0);
|
||||||
|
rb_define_method(cModel, "n_text_head", ruby_whisper_model_n_text_head, 0);
|
||||||
|
rb_define_method(cModel, "n_text_layer", ruby_whisper_model_n_text_layer, 0);
|
||||||
|
rb_define_method(cModel, "n_mels", ruby_whisper_model_n_mels, 0);
|
||||||
|
rb_define_method(cModel, "ftype", ruby_whisper_model_ftype, 0);
|
||||||
|
rb_define_method(cModel, "type", ruby_whisper_model_type, 0);
|
||||||
|
}
|
1077
bindings/ruby/ext/ruby_whisper_params.c
Normal file
1077
bindings/ruby/ext/ruby_whisper_params.c
Normal file
File diff suppressed because it is too large
Load Diff
123
bindings/ruby/ext/ruby_whisper_segment.c
Normal file
123
bindings/ruby/ext/ruby_whisper_segment.c
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
#include <ruby.h>
|
||||||
|
#include "ruby_whisper.h"
|
||||||
|
|
||||||
|
extern VALUE cSegment;
|
||||||
|
|
||||||
|
static void
|
||||||
|
rb_whisper_segment_mark(ruby_whisper_segment *rws)
|
||||||
|
{
|
||||||
|
rb_gc_mark(rws->context);
|
||||||
|
}
|
||||||
|
|
||||||
|
VALUE
|
||||||
|
ruby_whisper_segment_allocate(VALUE klass)
|
||||||
|
{
|
||||||
|
ruby_whisper_segment *rws;
|
||||||
|
rws = ALLOC(ruby_whisper_segment);
|
||||||
|
return Data_Wrap_Struct(klass, rb_whisper_segment_mark, RUBY_DEFAULT_FREE, rws);
|
||||||
|
}
|
||||||
|
|
||||||
|
VALUE
|
||||||
|
rb_whisper_segment_initialize(VALUE context, int index)
|
||||||
|
{
|
||||||
|
ruby_whisper_segment *rws;
|
||||||
|
const VALUE segment = ruby_whisper_segment_allocate(cSegment);
|
||||||
|
Data_Get_Struct(segment, ruby_whisper_segment, rws);
|
||||||
|
rws->context = context;
|
||||||
|
rws->index = index;
|
||||||
|
return segment;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Start time in milliseconds.
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* start_time -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_segment_get_start_time(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_segment *rws;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_segment, rws);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rws->context, ruby_whisper, rw);
|
||||||
|
const int64_t t0 = whisper_full_get_segment_t0(rw->context, rws->index);
|
||||||
|
// able to multiply 10 without overflow because to_timestamp() in whisper.cpp does it
|
||||||
|
return INT2NUM(t0 * 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* End time in milliseconds.
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* end_time -> Integer
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_segment_get_end_time(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_segment *rws;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_segment, rws);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rws->context, ruby_whisper, rw);
|
||||||
|
const int64_t t1 = whisper_full_get_segment_t1(rw->context, rws->index);
|
||||||
|
// able to multiply 10 without overflow because to_timestamp() in whisper.cpp does it
|
||||||
|
return INT2NUM(t1 * 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Whether the next segment is predicted as a speaker turn.
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* speaker_turn_next? -> bool
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_segment_get_speaker_turn_next(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_segment *rws;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_segment, rws);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rws->context, ruby_whisper, rw);
|
||||||
|
return whisper_full_get_segment_speaker_turn_next(rw->context, rws->index) ? Qtrue : Qfalse;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* text -> String
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_segment_get_text(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_segment *rws;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_segment, rws);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rws->context, ruby_whisper, rw);
|
||||||
|
const char * text = whisper_full_get_segment_text(rw->context, rws->index);
|
||||||
|
return rb_str_new2(text);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* call-seq:
|
||||||
|
* no_speech_prob -> Float
|
||||||
|
*/
|
||||||
|
static VALUE
|
||||||
|
ruby_whisper_segment_get_no_speech_prob(VALUE self)
|
||||||
|
{
|
||||||
|
ruby_whisper_segment *rws;
|
||||||
|
Data_Get_Struct(self, ruby_whisper_segment, rws);
|
||||||
|
ruby_whisper *rw;
|
||||||
|
Data_Get_Struct(rws->context, ruby_whisper, rw);
|
||||||
|
return DBL2NUM(whisper_full_get_segment_no_speech_prob(rw->context, rws->index));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
init_ruby_whisper_segment(VALUE *mWhisper, VALUE *cContext)
|
||||||
|
{
|
||||||
|
cSegment = rb_define_class_under(*mWhisper, "Segment", rb_cObject);
|
||||||
|
|
||||||
|
rb_define_alloc_func(cSegment, ruby_whisper_segment_allocate);
|
||||||
|
rb_define_method(cSegment, "start_time", ruby_whisper_segment_get_start_time, 0);
|
||||||
|
rb_define_method(cSegment, "end_time", ruby_whisper_segment_get_end_time, 0);
|
||||||
|
rb_define_method(cSegment, "speaker_next_turn?", ruby_whisper_segment_get_speaker_turn_next, 0);
|
||||||
|
rb_define_method(cSegment, "text", ruby_whisper_segment_get_text, 0);
|
||||||
|
rb_define_method(cSegment, "no_speech_prob", ruby_whisper_segment_get_no_speech_prob, 0);
|
||||||
|
}
|
83
bindings/ruby/ext/ruby_whisper_transcribe.cpp
Normal file
83
bindings/ruby/ext/ruby_whisper_transcribe.cpp
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
#include <ruby.h>
|
||||||
|
#include "ruby_whisper.h"
|
||||||
|
#include "common-whisper.h"
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern ID id_to_s;
|
||||||
|
extern ID id_call;
|
||||||
|
|
||||||
|
extern void
|
||||||
|
register_callbacks(ruby_whisper_params * rwp, VALUE * self);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* transcribe a single file
|
||||||
|
* can emit to a block results
|
||||||
|
*
|
||||||
|
* params = Whisper::Params.new
|
||||||
|
* params.duration = 60_000
|
||||||
|
* whisper.transcribe "path/to/audio.wav", params do |text|
|
||||||
|
* puts text
|
||||||
|
* end
|
||||||
|
*
|
||||||
|
* call-seq:
|
||||||
|
* transcribe(path_to_audio, params) {|text| ...}
|
||||||
|
**/
|
||||||
|
VALUE
|
||||||
|
ruby_whisper_transcribe(int argc, VALUE *argv, VALUE self) {
|
||||||
|
ruby_whisper *rw;
|
||||||
|
ruby_whisper_params *rwp;
|
||||||
|
VALUE wave_file_path, blk, params;
|
||||||
|
|
||||||
|
rb_scan_args(argc, argv, "02&", &wave_file_path, ¶ms, &blk);
|
||||||
|
Data_Get_Struct(self, ruby_whisper, rw);
|
||||||
|
Data_Get_Struct(params, ruby_whisper_params, rwp);
|
||||||
|
|
||||||
|
if (!rb_respond_to(wave_file_path, id_to_s)) {
|
||||||
|
rb_raise(rb_eRuntimeError, "Expected file path to wave file");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string fname_inp = StringValueCStr(wave_file_path);
|
||||||
|
|
||||||
|
std::vector<float> pcmf32; // mono-channel F32 PCM
|
||||||
|
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
||||||
|
|
||||||
|
if (!read_audio_data(fname_inp, pcmf32, pcmf32s, rwp->diarize)) {
|
||||||
|
fprintf(stderr, "error: failed to open '%s' as WAV file\n", fname_inp.c_str());
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
||||||
|
|
||||||
|
rwp->params.encoder_begin_callback = [](struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, void * user_data) {
|
||||||
|
bool is_aborted = *(bool*)user_data;
|
||||||
|
return !is_aborted;
|
||||||
|
};
|
||||||
|
rwp->params.encoder_begin_callback_user_data = &is_aborted;
|
||||||
|
}
|
||||||
|
|
||||||
|
register_callbacks(rwp, &self);
|
||||||
|
|
||||||
|
if (whisper_full_parallel(rw->context, rwp->params, pcmf32.data(), pcmf32.size(), 1) != 0) {
|
||||||
|
fprintf(stderr, "failed to process audio\n");
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
const int n_segments = whisper_full_n_segments(rw->context);
|
||||||
|
VALUE output = rb_str_new2("");
|
||||||
|
for (int i = 0; i < n_segments; ++i) {
|
||||||
|
const char * text = whisper_full_get_segment_text(rw->context, i);
|
||||||
|
output = rb_str_concat(output, rb_str_new2(text));
|
||||||
|
}
|
||||||
|
VALUE idCall = id_call;
|
||||||
|
if (blk != Qnil) {
|
||||||
|
rb_funcall(blk, idCall, 1, output);
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
@ -1,2 +0,0 @@
|
|||||||
require "whisper.so"
|
|
||||||
require "whisper/model/uri"
|
|
@ -1,157 +1,170 @@
|
|||||||
require "whisper.so"
|
|
||||||
require "uri"
|
require "uri"
|
||||||
require "net/http"
|
require "net/http"
|
||||||
require "time"
|
require "time"
|
||||||
require "pathname"
|
require "pathname"
|
||||||
require "io/console/size"
|
require "io/console/size"
|
||||||
|
|
||||||
class Whisper::Model
|
module Whisper
|
||||||
class URI
|
class Model
|
||||||
def initialize(uri)
|
class URI
|
||||||
@uri = URI(uri)
|
def initialize(uri)
|
||||||
end
|
@uri = URI(uri)
|
||||||
|
end
|
||||||
|
|
||||||
def to_path
|
def to_path
|
||||||
cache
|
cache
|
||||||
cache_path.to_path
|
cache_path.to_path
|
||||||
end
|
end
|
||||||
|
|
||||||
def clear_cache
|
def clear_cache
|
||||||
path = cache_path
|
path = cache_path
|
||||||
path.delete if path.exist?
|
path.delete if path.exist?
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
def cache_path
|
def cache_path
|
||||||
base_cache_dir/@uri.host/@uri.path[1..]
|
base_cache_dir/@uri.host/@uri.path[1..]
|
||||||
end
|
end
|
||||||
|
|
||||||
def base_cache_dir
|
def base_cache_dir
|
||||||
base = case RUBY_PLATFORM
|
base = case RUBY_PLATFORM
|
||||||
when /mswin|mingw/
|
when /mswin|mingw/
|
||||||
ENV.key?("LOCALAPPDATA") ? Pathname(ENV["LOCALAPPDATA"]) : Pathname(Dir.home)/"AppData/Local"
|
ENV.key?("LOCALAPPDATA") ? Pathname(ENV["LOCALAPPDATA"]) : Pathname(Dir.home)/"AppData/Local"
|
||||||
when /darwin/
|
when /darwin/
|
||||||
Pathname(Dir.home)/"Library/Caches"
|
Pathname(Dir.home)/"Library/Caches"
|
||||||
else
|
else
|
||||||
ENV.key?("XDG_CACHE_HOME") ? ENV["XDG_CACHE_HOME"] : Pathname(Dir.home)/".cache"
|
ENV.key?("XDG_CACHE_HOME") ? ENV["XDG_CACHE_HOME"] : Pathname(Dir.home)/".cache"
|
||||||
end
|
end
|
||||||
base/"whisper.cpp"
|
base/"whisper.cpp"
|
||||||
end
|
end
|
||||||
|
|
||||||
def cache
|
def cache
|
||||||
path = cache_path
|
path = cache_path
|
||||||
headers = {}
|
headers = {}
|
||||||
headers["if-modified-since"] = path.mtime.httpdate if path.exist?
|
headers["if-modified-since"] = path.mtime.httpdate if path.exist?
|
||||||
request @uri, headers
|
request @uri, headers
|
||||||
path
|
path
|
||||||
end
|
end
|
||||||
|
|
||||||
def request(uri, headers)
|
def request(uri, headers)
|
||||||
Net::HTTP.start uri.host, uri.port, use_ssl: uri.scheme == "https" do |http|
|
Net::HTTP.start uri.host, uri.port, use_ssl: uri.scheme == "https" do |http|
|
||||||
request = Net::HTTP::Get.new(uri, headers)
|
request = Net::HTTP::Get.new(uri, headers)
|
||||||
http.request request do |response|
|
http.request request do |response|
|
||||||
case response
|
case response
|
||||||
when Net::HTTPNotModified
|
when Net::HTTPNotModified
|
||||||
# noop
|
# noop
|
||||||
when Net::HTTPOK
|
when Net::HTTPOK
|
||||||
download response
|
download response
|
||||||
when Net::HTTPRedirection
|
when Net::HTTPRedirection
|
||||||
request URI(response["location"]), headers
|
request URI(response["location"]), headers
|
||||||
else
|
else
|
||||||
return if headers.key?("if-modified-since") # Use cache file
|
return if headers.key?("if-modified-since") # Use cache file
|
||||||
|
|
||||||
raise "#{response.code} #{response.message}\n#{response.body}"
|
raise "#{response.code} #{response.message}\n#{response.body}"
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
rescue => err
|
||||||
end
|
if cache_path.exist?
|
||||||
|
warn err
|
||||||
def download(response)
|
# Use cache file
|
||||||
path = cache_path
|
else
|
||||||
path.dirname.mkpath unless path.dirname.exist?
|
raise
|
||||||
downloading_path = Pathname("#{path}.downloading")
|
|
||||||
size = response.content_length
|
|
||||||
downloading_path.open "wb" do |file|
|
|
||||||
downloaded = 0
|
|
||||||
response.read_body do |chunk|
|
|
||||||
file << chunk
|
|
||||||
downloaded += chunk.bytesize
|
|
||||||
show_progress downloaded, size
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
downloading_path.rename path
|
|
||||||
end
|
|
||||||
|
|
||||||
def show_progress(current, size)
|
def download(response)
|
||||||
return unless $stderr.tty?
|
path = cache_path
|
||||||
return unless size
|
path.dirname.mkpath unless path.dirname.exist?
|
||||||
|
downloading_path = Pathname("#{path}.downloading")
|
||||||
unless @prev
|
size = response.content_length
|
||||||
@prev = Time.now
|
downloading_path.open "wb" do |file|
|
||||||
$stderr.puts "Downloading #{@uri}"
|
downloaded = 0
|
||||||
|
response.read_body do |chunk|
|
||||||
|
file << chunk
|
||||||
|
downloaded += chunk.bytesize
|
||||||
|
show_progress downloaded, size
|
||||||
|
end
|
||||||
|
$stderr.puts
|
||||||
|
end
|
||||||
|
downloading_path.rename path
|
||||||
end
|
end
|
||||||
|
|
||||||
now = Time.now
|
def show_progress(current, size)
|
||||||
return if now - @prev < 1 && current < size
|
progress_rate_available = size && $stderr.tty?
|
||||||
|
|
||||||
progress_width = 20
|
unless @prev
|
||||||
progress = current.to_f / size
|
@prev = Time.now
|
||||||
arrow_length = progress * progress_width
|
$stderr.puts "Downloading #{@uri} to #{cache_path}"
|
||||||
arrow = "=" * (arrow_length - 1) + ">" + " " * (progress_width - arrow_length)
|
end
|
||||||
line = "[#{arrow}] (#{format_bytesize(current)} / #{format_bytesize(size)})"
|
|
||||||
padding = ' ' * ($stderr.winsize[1] - line.size)
|
now = Time.now
|
||||||
$stderr.print "\r#{line}#{padding}"
|
|
||||||
$stderr.puts if current >= size
|
if progress_rate_available
|
||||||
@prev = now
|
return if now - @prev < 1 && current < size
|
||||||
|
|
||||||
|
progress_width = 20
|
||||||
|
progress = current.to_f / size
|
||||||
|
arrow_length = progress * progress_width
|
||||||
|
arrow = "=" * (arrow_length - 1) + ">" + " " * (progress_width - arrow_length)
|
||||||
|
line = "[#{arrow}] (#{format_bytesize(current)} / #{format_bytesize(size)})"
|
||||||
|
padding = ' ' * ($stderr.winsize[1] - line.size)
|
||||||
|
$stderr.print "\r#{line}#{padding}"
|
||||||
|
else
|
||||||
|
return if now - @prev < 1
|
||||||
|
|
||||||
|
$stderr.print "."
|
||||||
|
end
|
||||||
|
@prev = now
|
||||||
|
end
|
||||||
|
|
||||||
|
def format_bytesize(bytesize)
|
||||||
|
return "0.0 B" if bytesize.zero?
|
||||||
|
|
||||||
|
units = %w[B KiB MiB GiB TiB]
|
||||||
|
exp = (Math.log(bytesize) / Math.log(1024)).to_i
|
||||||
|
format("%.1f %s", bytesize.to_f / 1024 ** exp, units[exp])
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def format_bytesize(bytesize)
|
@pre_converted_models = %w[
|
||||||
return "0.0 B" if bytesize.zero?
|
tiny
|
||||||
|
tiny.en
|
||||||
|
tiny-q5_1
|
||||||
|
tiny.en-q5_1
|
||||||
|
tiny-q8_0
|
||||||
|
base
|
||||||
|
base.en
|
||||||
|
base-q5_1
|
||||||
|
base.en-q5_1
|
||||||
|
base-q8_0
|
||||||
|
small
|
||||||
|
small.en
|
||||||
|
small.en-tdrz
|
||||||
|
small-q5_1
|
||||||
|
small.en-q5_1
|
||||||
|
small-q8_0
|
||||||
|
medium
|
||||||
|
medium.en
|
||||||
|
medium-q5_0
|
||||||
|
medium.en-q5_0
|
||||||
|
medium-q8_0
|
||||||
|
large-v1
|
||||||
|
large-v2
|
||||||
|
large-v2-q5_0
|
||||||
|
large-v2-q8_0
|
||||||
|
large-v3
|
||||||
|
large-v3-q5_0
|
||||||
|
large-v3-turbo
|
||||||
|
large-v3-turbo-q5_0
|
||||||
|
large-v3-turbo-q8_0
|
||||||
|
].each_with_object({}) {|name, models|
|
||||||
|
models[name] = URI.new("https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-#{name}.bin")
|
||||||
|
}
|
||||||
|
|
||||||
units = %w[B KiB MiB GiB TiB]
|
class << self
|
||||||
exp = (Math.log(bytesize) / Math.log(1024)).to_i
|
attr_reader :pre_converted_models
|
||||||
format("%.1f %s", bytesize.to_f / 1024 ** exp, units[exp])
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@pre_converted_models = {}
|
|
||||||
%w[
|
|
||||||
tiny
|
|
||||||
tiny.en
|
|
||||||
tiny-q5_1
|
|
||||||
tiny.en-q5_1
|
|
||||||
tiny-q8_0
|
|
||||||
base
|
|
||||||
base.en
|
|
||||||
base-q5_1
|
|
||||||
base.en-q5_1
|
|
||||||
base-q8_0
|
|
||||||
small
|
|
||||||
small.en
|
|
||||||
small.en-tdrz
|
|
||||||
small-q5_1
|
|
||||||
small.en-q5_1
|
|
||||||
small-q8_0
|
|
||||||
medium
|
|
||||||
medium.en
|
|
||||||
medium-q5_0
|
|
||||||
medium.en-q5_0
|
|
||||||
medium-q8_0
|
|
||||||
large-v1
|
|
||||||
large-v2
|
|
||||||
large-v2-q5_0
|
|
||||||
large-v2-q8_0
|
|
||||||
large-v3
|
|
||||||
large-v3-q5_0
|
|
||||||
large-v3-turbo
|
|
||||||
large-v3-turbo-q5_0
|
|
||||||
large-v3-turbo-q8_0
|
|
||||||
].each do |name|
|
|
||||||
@pre_converted_models[name] = URI.new("https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-#{name}.bin")
|
|
||||||
end
|
|
||||||
|
|
||||||
class << self
|
|
||||||
attr_reader :pre_converted_models
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
189
bindings/ruby/sig/whisper.rbs
Normal file
189
bindings/ruby/sig/whisper.rbs
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
module Whisper
|
||||||
|
interface _Samples
|
||||||
|
def length: () -> Integer
|
||||||
|
def each: { (Float) -> void } -> void
|
||||||
|
end
|
||||||
|
|
||||||
|
type log_callback = ^(Integer level, String message, Object user_data) -> void
|
||||||
|
type new_segment_callback = ^(Whisper::Context, void, Integer n_new, Object user_data) -> void
|
||||||
|
type progress_callback = ^(Whisper::Context, void, Integer progress, Object user_data) -> void
|
||||||
|
type abort_callback = ^(Whisper::Context, void, Object user_data) -> boolish
|
||||||
|
|
||||||
|
LOG_LEVEL_NONE: Integer
|
||||||
|
LOG_LEVEL_INFO: Integer
|
||||||
|
LOG_LEVEL_WARN: Integer
|
||||||
|
LOG_LEVEL_ERROR: Integer
|
||||||
|
LOG_LEVEL_DEBUG: Integer
|
||||||
|
LOG_LEVEL_CONT: Integer
|
||||||
|
|
||||||
|
def self.lang_max_id: () -> Integer
|
||||||
|
def self.lang_id: (string name) -> Integer
|
||||||
|
def self.lang_str: (Integer id) -> String
|
||||||
|
def self.lang_str_full: (Integer id) -> String
|
||||||
|
def self.log_set: (log_callback, Object? user_data) -> log_callback
|
||||||
|
|
||||||
|
class Context
|
||||||
|
def self.new: (string | _ToPath | ::URI::HTTP) -> instance
|
||||||
|
def transcribe: (string, Params) -> self
|
||||||
|
| (string, Params) { (String) -> void } -> self
|
||||||
|
def model_n_vocab: () -> Integer
|
||||||
|
def model_n_audio_ctx: () -> Integer
|
||||||
|
def model_n_audio_state: () -> Integer
|
||||||
|
def model_n_text_head: () -> Integer
|
||||||
|
def model_n_text_layer: () -> Integer
|
||||||
|
def model_n_mels: () -> Integer
|
||||||
|
def model_ftype: () -> Integer
|
||||||
|
def model_type: () -> String
|
||||||
|
def each_segment: { (Segment) -> void } -> void
|
||||||
|
| () -> Enumerator[Segment]
|
||||||
|
def model: () -> Model
|
||||||
|
def full_get_segment: (Integer nth) -> Segment
|
||||||
|
def full_n_segments: () -> Integer
|
||||||
|
def full_lang_id: () -> Integer
|
||||||
|
def full_get_segment_t0: (Integer) -> Integer
|
||||||
|
def full_get_segment_t1: (Integer) -> Integer
|
||||||
|
def full_get_segment_speaker_turn_next: (Integer) -> (true | false)
|
||||||
|
def full_get_segment_text: (Integer) -> String
|
||||||
|
def full_get_segment_no_speech_prob: (Integer) -> Float
|
||||||
|
def full: (Params, Array[Float] samples, ?Integer n_samples) -> self
|
||||||
|
| (Params, _Samples, ?Integer n_samples) -> self
|
||||||
|
def full_parallel: (Params, Array[Float], ?Integer n_samples) -> self
|
||||||
|
| (Params, _Samples, ?Integer n_samples) -> self
|
||||||
|
| (Params, _Samples, ?Integer? n_samples, Integer n_processors) -> self
|
||||||
|
end
|
||||||
|
|
||||||
|
class Params
|
||||||
|
def self.new: (
|
||||||
|
?language: string,
|
||||||
|
?translate: boolish,
|
||||||
|
?no_context: boolish,
|
||||||
|
?single_segment: boolish,
|
||||||
|
?print_special: boolish,
|
||||||
|
?print_progress: boolish,
|
||||||
|
?print_realtime: boolish,
|
||||||
|
?print_timestamps: boolish,
|
||||||
|
?suppress_blank: boolish,
|
||||||
|
?suppress_nst: boolish,
|
||||||
|
?token_timestamps: boolish,
|
||||||
|
?split_on_word: boolish,
|
||||||
|
?initial_prompt: string | nil,
|
||||||
|
?diarize: boolish,
|
||||||
|
?offset: Integer,
|
||||||
|
?duration: Integer,
|
||||||
|
?max_text_tokens: Integer,
|
||||||
|
?temperature: Float,
|
||||||
|
?max_initial_ts: Float,
|
||||||
|
?length_penalty: Float,
|
||||||
|
?temperature_inc: Float,
|
||||||
|
?entropy_thold: Float,
|
||||||
|
?logprob_thold: Float,
|
||||||
|
?no_speech_thold: Float,
|
||||||
|
?new_segment_callback: new_segment_callback,
|
||||||
|
?new_segment_callback_user_data: Object,
|
||||||
|
?progress_callback: progress_callback,
|
||||||
|
?progress_callback_user_data: Object,
|
||||||
|
?abort_callback: abort_callback,
|
||||||
|
?abort_callback_user_data: Object
|
||||||
|
) -> instance
|
||||||
|
def language=: (String) -> String # TODO: Enumerate lang names
|
||||||
|
def language: () -> String
|
||||||
|
def translate=: (boolish) -> boolish
|
||||||
|
def translate: () -> (true | false)
|
||||||
|
def no_context=: (boolish) -> boolish
|
||||||
|
def no_context: () -> (true | false)
|
||||||
|
def single_segment=: (boolish) -> boolish
|
||||||
|
def single_segment: () -> (true | false)
|
||||||
|
def print_special=: (boolish) -> boolish
|
||||||
|
def print_special: () -> (true | false)
|
||||||
|
def print_progress=: (boolish) -> boolish
|
||||||
|
def print_progress: () -> (true | false)
|
||||||
|
def print_realtime=: (boolish) -> boolish
|
||||||
|
def print_realtime: () -> (true | false)
|
||||||
|
def print_timestamps=: (boolish) -> boolish
|
||||||
|
def print_timestamps: () -> (true | false)
|
||||||
|
def suppress_blank=: (boolish) -> boolish
|
||||||
|
def suppress_blank: () -> (true | false)
|
||||||
|
def suppress_nst=: (boolish) -> boolish
|
||||||
|
def suppress_nst: () -> (true | false)
|
||||||
|
def token_timestamps=: (boolish) -> boolish
|
||||||
|
def token_timestamps: () -> (true | false)
|
||||||
|
def split_on_word=: (boolish) -> boolish
|
||||||
|
def split_on_word: () -> (true | false)
|
||||||
|
def initial_prompt=: (_ToS) -> _ToS
|
||||||
|
def initial_prompt: () -> (String | nil)
|
||||||
|
def diarize=: (boolish) -> boolish
|
||||||
|
def diarize: () -> (true | false)
|
||||||
|
def offset=: (Integer) -> Integer
|
||||||
|
def offset: () -> Integer
|
||||||
|
def duration=: (Integer) -> Integer
|
||||||
|
def duration: () -> Integer
|
||||||
|
def max_text_tokens=: (Integer) -> Integer
|
||||||
|
def max_text_tokens: () -> Integer
|
||||||
|
def temperature=: (Float) -> Float
|
||||||
|
def temperature: () -> Float
|
||||||
|
def max_initial_ts=: (Float) -> Float
|
||||||
|
def max_initial_ts: () -> Float
|
||||||
|
def length_penalty=: (Float) -> Float
|
||||||
|
def length_penalty: () -> Float
|
||||||
|
def temperature_inc=: (Float) -> Float
|
||||||
|
def temperature_inc: () -> Float
|
||||||
|
def entropy_thold=: (Float) -> Float
|
||||||
|
def entropy_thold: () -> Float
|
||||||
|
def logprob_thold=: (Float) -> Float
|
||||||
|
def logprob_thold: () -> Float
|
||||||
|
def no_speech_thold=: (Float) -> Float
|
||||||
|
def no_speech_thold: () -> Float
|
||||||
|
def new_segment_callback=: (new_segment_callback) -> new_segment_callback
|
||||||
|
def new_segment_callback: () -> (new_segment_callback | nil)
|
||||||
|
def new_segment_callback_user_data=: (Object) -> Object
|
||||||
|
def new_segment_callback_user_data: () -> Object
|
||||||
|
def progress_callback=: (progress_callback) -> progress_callback
|
||||||
|
def progress_callback: () -> (progress_callback | nil)
|
||||||
|
def progress_callback_user_data=: (Object) -> Object
|
||||||
|
def progress_callback_user_data: () -> Object
|
||||||
|
def abort_callback=: (abort_callback) -> abort_callback
|
||||||
|
def abort_callback: () -> (abort_callback | nil)
|
||||||
|
def abort_callback_user_data=: (Object) -> Object
|
||||||
|
def abort_callback_user_data: () -> Object
|
||||||
|
def on_new_segment: { (Segment) -> void } -> void
|
||||||
|
def on_progress: { (Integer progress) -> void } -> void
|
||||||
|
def abort_on: { (Object user_data) -> boolish } -> void
|
||||||
|
end
|
||||||
|
|
||||||
|
class Model
|
||||||
|
def self.pre_converted_models: () -> Hash[String, Model::URI]
|
||||||
|
def self.new: () -> instance
|
||||||
|
def n_vocab: () -> Integer
|
||||||
|
def n_audio_ctx: () -> Integer
|
||||||
|
def n_audio_state: () -> Integer
|
||||||
|
def n_audio_head: () -> Integer
|
||||||
|
def n_audio_layer: () -> Integer
|
||||||
|
def n_text_ctx: () -> Integer
|
||||||
|
def n_text_state: () -> Integer
|
||||||
|
def n_text_head: () -> Integer
|
||||||
|
def n_text_layer: () -> Integer
|
||||||
|
def n_mels: () -> Integer
|
||||||
|
def ftype: () -> Integer
|
||||||
|
def type: () -> String
|
||||||
|
|
||||||
|
class URI
|
||||||
|
def self.new: (string | ::URI::HTTP) -> self
|
||||||
|
def to_path: -> String
|
||||||
|
def clear_cache: -> void
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
class Segment
|
||||||
|
def start_time: () -> Integer
|
||||||
|
def end_time: () -> Integer
|
||||||
|
def speaker_next_turn?: () -> (true | false)
|
||||||
|
def text: () -> String
|
||||||
|
def no_speech_prob: () -> Float
|
||||||
|
end
|
||||||
|
|
||||||
|
class Error < StandardError
|
||||||
|
attr_reader code: Integer
|
||||||
|
|
||||||
|
def self.new: (Integer code) -> instance
|
||||||
|
end
|
||||||
|
end
|
@ -4,4 +4,21 @@ require_relative "jfk_reader/jfk_reader"
|
|||||||
|
|
||||||
class TestBase < Test::Unit::TestCase
|
class TestBase < Test::Unit::TestCase
|
||||||
AUDIO = File.join(__dir__, "..", "..", "..", "samples", "jfk.wav")
|
AUDIO = File.join(__dir__, "..", "..", "..", "samples", "jfk.wav")
|
||||||
|
|
||||||
|
class << self
|
||||||
|
attr_reader :whisper
|
||||||
|
|
||||||
|
def startup
|
||||||
|
@whisper = Whisper::Context.new("base.en")
|
||||||
|
params = Whisper::Params.new
|
||||||
|
params.print_timestamps = false
|
||||||
|
@whisper.transcribe(TestBase::AUDIO, params)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def whisper
|
||||||
|
self.class.whisper
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
@ -68,4 +68,42 @@ class TestModel < TestBase
|
|||||||
assert_path_exist path
|
assert_path_exist path
|
||||||
assert_equal 147964211, File.size(path)
|
assert_equal 147964211, File.size(path)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_uri_string
|
||||||
|
path = "https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin"
|
||||||
|
whisper = Whisper::Context.new(path)
|
||||||
|
model = whisper.model
|
||||||
|
|
||||||
|
assert_equal 51864, model.n_vocab
|
||||||
|
assert_equal 1500, model.n_audio_ctx
|
||||||
|
assert_equal 512, model.n_audio_state
|
||||||
|
assert_equal 8, model.n_audio_head
|
||||||
|
assert_equal 6, model.n_audio_layer
|
||||||
|
assert_equal 448, model.n_text_ctx
|
||||||
|
assert_equal 512, model.n_text_state
|
||||||
|
assert_equal 8, model.n_text_head
|
||||||
|
assert_equal 6, model.n_text_layer
|
||||||
|
assert_equal 80, model.n_mels
|
||||||
|
assert_equal 1, model.ftype
|
||||||
|
assert_equal "base", model.type
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_uri
|
||||||
|
path = URI("https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin")
|
||||||
|
whisper = Whisper::Context.new(path)
|
||||||
|
model = whisper.model
|
||||||
|
|
||||||
|
assert_equal 51864, model.n_vocab
|
||||||
|
assert_equal 1500, model.n_audio_ctx
|
||||||
|
assert_equal 512, model.n_audio_state
|
||||||
|
assert_equal 8, model.n_audio_head
|
||||||
|
assert_equal 6, model.n_audio_layer
|
||||||
|
assert_equal 448, model.n_text_ctx
|
||||||
|
assert_equal 512, model.n_text_state
|
||||||
|
assert_equal 8, model.n_text_head
|
||||||
|
assert_equal 6, model.n_text_layer
|
||||||
|
assert_equal 80, model.n_mels
|
||||||
|
assert_equal 1, model.ftype
|
||||||
|
assert_equal "base", model.type
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
@ -23,7 +23,7 @@ class TestPackage < TestBase
|
|||||||
version = match_data[2]
|
version = match_data[2]
|
||||||
basename = "whisper.#{RbConfig::CONFIG["DLEXT"]}"
|
basename = "whisper.#{RbConfig::CONFIG["DLEXT"]}"
|
||||||
Dir.mktmpdir do |dir|
|
Dir.mktmpdir do |dir|
|
||||||
system "gem", "install", "--install-dir", dir.shellescape, "pkg/#{filename.shellescape}", exception: true
|
system "gem", "install", "--install-dir", dir.shellescape, "--no-document", "pkg/#{filename.shellescape}", exception: true
|
||||||
assert_path_exist File.join(dir, "gems/whispercpp-#{version}/lib", basename)
|
assert_path_exist File.join(dir, "gems/whispercpp-#{version}/lib", basename)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -1,6 +1,39 @@
|
|||||||
require_relative "helper"
|
require_relative "helper"
|
||||||
|
|
||||||
class TestParams < TestBase
|
class TestParams < TestBase
|
||||||
|
PARAM_NAMES = [
|
||||||
|
:language,
|
||||||
|
:translate,
|
||||||
|
:no_context,
|
||||||
|
:single_segment,
|
||||||
|
:print_special,
|
||||||
|
:print_progress,
|
||||||
|
:print_realtime,
|
||||||
|
:print_timestamps,
|
||||||
|
:suppress_blank,
|
||||||
|
:suppress_nst,
|
||||||
|
:token_timestamps,
|
||||||
|
:split_on_word,
|
||||||
|
:initial_prompt,
|
||||||
|
:diarize,
|
||||||
|
:offset,
|
||||||
|
:duration,
|
||||||
|
:max_text_tokens,
|
||||||
|
:temperature,
|
||||||
|
:max_initial_ts,
|
||||||
|
:length_penalty,
|
||||||
|
:temperature_inc,
|
||||||
|
:entropy_thold,
|
||||||
|
:logprob_thold,
|
||||||
|
:no_speech_thold,
|
||||||
|
:new_segment_callback,
|
||||||
|
:new_segment_callback_user_data,
|
||||||
|
:progress_callback,
|
||||||
|
:progress_callback_user_data,
|
||||||
|
:abort_callback,
|
||||||
|
:abort_callback_user_data,
|
||||||
|
]
|
||||||
|
|
||||||
def setup
|
def setup
|
||||||
@params = Whisper::Params.new
|
@params = Whisper::Params.new
|
||||||
end
|
end
|
||||||
@ -89,11 +122,11 @@ class TestParams < TestBase
|
|||||||
assert !@params.suppress_blank
|
assert !@params.suppress_blank
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_suppress_non_speech_tokens
|
def test_suppress_nst
|
||||||
@params.suppress_non_speech_tokens = true
|
@params.suppress_nst = true
|
||||||
assert @params.suppress_non_speech_tokens
|
assert @params.suppress_nst
|
||||||
@params.suppress_non_speech_tokens = false
|
@params.suppress_nst = false
|
||||||
assert !@params.suppress_non_speech_tokens
|
assert !@params.suppress_nst
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_token_timestamps
|
def test_token_timestamps
|
||||||
@ -157,4 +190,57 @@ class TestParams < TestBase
|
|||||||
@params.no_speech_thold = 0.2
|
@params.no_speech_thold = 0.2
|
||||||
assert_in_delta 0.2, @params.no_speech_thold
|
assert_in_delta 0.2, @params.no_speech_thold
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_new_with_kw_args
|
||||||
|
params = Whisper::Params.new(language: "es")
|
||||||
|
assert_equal "es", params.language
|
||||||
|
assert_equal 1.0, params.max_initial_ts
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_new_with_kw_args_non_existent
|
||||||
|
assert_raise ArgumentError do
|
||||||
|
Whisper::Params.new(non_existent: "value")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_new_with_kw_args_wrong_type
|
||||||
|
assert_raise TypeError do
|
||||||
|
Whisper::Params.new(language: 3)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
data(PARAM_NAMES.collect {|param| [param, param]}.to_h)
|
||||||
|
def test_new_with_kw_args_default_values(param)
|
||||||
|
default_value = @params.send(param)
|
||||||
|
value = case [param, default_value]
|
||||||
|
in [*, true | false]
|
||||||
|
!default_value
|
||||||
|
in [*, Integer | Float]
|
||||||
|
default_value + 1
|
||||||
|
in [:language, *]
|
||||||
|
"es"
|
||||||
|
in [:initial_prompt, *]
|
||||||
|
"Initial prompt"
|
||||||
|
in [/_callback\Z/, *]
|
||||||
|
proc {}
|
||||||
|
in [/_user_data\Z/, *]
|
||||||
|
Object.new
|
||||||
|
end
|
||||||
|
params = Whisper::Params.new(param => value)
|
||||||
|
if Float === value
|
||||||
|
assert_in_delta value, params.send(param)
|
||||||
|
else
|
||||||
|
assert_equal value, params.send(param)
|
||||||
|
end
|
||||||
|
|
||||||
|
PARAM_NAMES.reject {|name| name == param}.each do |name|
|
||||||
|
expected = @params.send(name)
|
||||||
|
actual = params.send(name)
|
||||||
|
if Float === expected
|
||||||
|
assert_in_delta expected, actual
|
||||||
|
else
|
||||||
|
assert_equal expected, actual
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
@ -1,17 +1,6 @@
|
|||||||
require_relative "helper"
|
require_relative "helper"
|
||||||
|
|
||||||
class TestSegment < TestBase
|
class TestSegment < TestBase
|
||||||
class << self
|
|
||||||
attr_reader :whisper
|
|
||||||
|
|
||||||
def startup
|
|
||||||
@whisper = Whisper::Context.new("base.en")
|
|
||||||
params = Whisper::Params.new
|
|
||||||
params.print_timestamps = false
|
|
||||||
@whisper.transcribe(TestBase::AUDIO, params)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_iteration
|
def test_iteration
|
||||||
whisper.each_segment do |segment|
|
whisper.each_segment do |segment|
|
||||||
assert_instance_of Whisper::Segment, segment
|
assert_instance_of Whisper::Segment, segment
|
||||||
@ -43,6 +32,14 @@ class TestSegment < TestBase
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_no_speech_prob
|
||||||
|
no_speech_prob = nil
|
||||||
|
whisper.each_segment do |segment|
|
||||||
|
no_speech_prob = segment.no_speech_prob
|
||||||
|
end
|
||||||
|
assert no_speech_prob > 0.0
|
||||||
|
end
|
||||||
|
|
||||||
def test_on_new_segment
|
def test_on_new_segment
|
||||||
params = Whisper::Params.new
|
params = Whisper::Params.new
|
||||||
seg = nil
|
seg = nil
|
||||||
@ -74,10 +71,4 @@ class TestSegment < TestBase
|
|||||||
end
|
end
|
||||||
whisper.transcribe(AUDIO, params)
|
whisper.transcribe(AUDIO, params)
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
|
||||||
|
|
||||||
def whisper
|
|
||||||
self.class.whisper
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
@ -21,21 +21,6 @@ class TestWhisper < TestBase
|
|||||||
end
|
end
|
||||||
|
|
||||||
sub_test_case "After transcription" do
|
sub_test_case "After transcription" do
|
||||||
class << self
|
|
||||||
attr_reader :whisper
|
|
||||||
|
|
||||||
def startup
|
|
||||||
@whisper = Whisper::Context.new("base.en")
|
|
||||||
params = Whisper::Params.new
|
|
||||||
params.print_timestamps = false
|
|
||||||
@whisper.transcribe(TestBase::AUDIO, params)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def whisper
|
|
||||||
self.class.whisper
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_full_n_segments
|
def test_full_n_segments
|
||||||
assert_equal 1, whisper.full_n_segments
|
assert_equal 1, whisper.full_n_segments
|
||||||
end
|
end
|
||||||
@ -44,6 +29,12 @@ class TestWhisper < TestBase
|
|||||||
assert_equal 0, whisper.full_lang_id
|
assert_equal 0, whisper.full_lang_id
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_full_get_segment
|
||||||
|
segment = whisper.full_get_segment(0)
|
||||||
|
assert_equal 0, segment.start_time
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, segment.text
|
||||||
|
end
|
||||||
|
|
||||||
def test_full_get_segment_t0
|
def test_full_get_segment_t0
|
||||||
assert_equal 0, whisper.full_get_segment_t0(0)
|
assert_equal 0, whisper.full_get_segment_t0(0)
|
||||||
assert_raise IndexError do
|
assert_raise IndexError do
|
||||||
@ -70,6 +61,12 @@ class TestWhisper < TestBase
|
|||||||
def test_full_get_segment_text
|
def test_full_get_segment_text
|
||||||
assert_match /ask not what your country can do for you, ask what you can do for your country/, whisper.full_get_segment_text(0)
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, whisper.full_get_segment_text(0)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def test_full_get_segment_no_speech_prob
|
||||||
|
prob = whisper.full_get_segment_no_speech_prob(0)
|
||||||
|
assert prob > 0.0
|
||||||
|
assert prob < 1.0
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_lang_max_id
|
def test_lang_max_id
|
||||||
|
@ -3,12 +3,12 @@ require_relative "extsources"
|
|||||||
Gem::Specification.new do |s|
|
Gem::Specification.new do |s|
|
||||||
s.name = "whispercpp"
|
s.name = "whispercpp"
|
||||||
s.authors = ["Georgi Gerganov", "Todd A. Fisher"]
|
s.authors = ["Georgi Gerganov", "Todd A. Fisher"]
|
||||||
s.version = '1.3.0'
|
s.version = '1.3.1'
|
||||||
s.date = '2024-05-14'
|
s.date = '2024-12-19'
|
||||||
s.description = %q{High-performance inference of OpenAI's Whisper automatic speech recognition (ASR) model via Ruby}
|
s.description = %q{High-performance inference of OpenAI's Whisper automatic speech recognition (ASR) model via Ruby}
|
||||||
s.email = 'todd.fisher@gmail.com'
|
s.email = 'todd.fisher@gmail.com'
|
||||||
s.extra_rdoc_files = ['LICENSE', 'README.md']
|
s.extra_rdoc_files = ['LICENSE', 'README.md']
|
||||||
|
|
||||||
s.files = `git ls-files . -z`.split("\x0") +
|
s.files = `git ls-files . -z`.split("\x0") +
|
||||||
EXTSOURCES.collect {|file|
|
EXTSOURCES.collect {|file|
|
||||||
basename = File.basename(file)
|
basename = File.basename(file)
|
||||||
@ -21,7 +21,7 @@ Gem::Specification.new do |s|
|
|||||||
|
|
||||||
s.summary = %q{Ruby whisper.cpp bindings}
|
s.summary = %q{Ruby whisper.cpp bindings}
|
||||||
s.test_files = s.files.select {|file| file.start_with? "tests/"}
|
s.test_files = s.files.select {|file| file.start_with? "tests/"}
|
||||||
|
|
||||||
s.extensions << 'ext/extconf.rb'
|
s.extensions << 'ext/extconf.rb'
|
||||||
s.required_ruby_version = '>= 3.1.0'
|
s.required_ruby_version = '>= 3.1.0'
|
||||||
|
|
||||||
@ -29,8 +29,8 @@ Gem::Specification.new do |s|
|
|||||||
s.homepage = 'https://github.com/ggerganov/whisper.cpp'
|
s.homepage = 'https://github.com/ggerganov/whisper.cpp'
|
||||||
s.rdoc_options = ['--main', 'README.md']
|
s.rdoc_options = ['--main', 'README.md']
|
||||||
|
|
||||||
|
|
||||||
s.platform = Gem::Platform::RUBY
|
s.platform = Gem::Platform::RUBY
|
||||||
|
|
||||||
s.licenses = ['MIT']
|
s.licenses = ['MIT']
|
||||||
end
|
end
|
||||||
|
519
build-xcframework.sh
Executable file
519
build-xcframework.sh
Executable file
@ -0,0 +1,519 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Options
|
||||||
|
IOS_MIN_OS_VERSION=16.4
|
||||||
|
MACOS_MIN_OS_VERSION=13.3
|
||||||
|
VISIONOS_MIN_OS_VERSION=1.0
|
||||||
|
TVOS_MIN_OS_VERSION=16.4
|
||||||
|
|
||||||
|
BUILD_SHARED_LIBS=OFF
|
||||||
|
WHISPER_BUILD_EXAMPLES=OFF
|
||||||
|
WHISPER_BUILD_TESTS=OFF
|
||||||
|
WHISPER_BUILD_SERVER=OFF
|
||||||
|
GGML_METAL=ON
|
||||||
|
GGML_METAL_EMBED_LIBRARY=ON
|
||||||
|
GGML_BLAS_DEFAULT=ON
|
||||||
|
GGML_METAL_USE_BF16=ON
|
||||||
|
GGML_OPENMP=OFF
|
||||||
|
|
||||||
|
COMMON_C_FLAGS="-Wno-macro-redefined -Wno-shorten-64-to-32 -Wno-unused-command-line-argument -g"
|
||||||
|
COMMON_CXX_FLAGS="-Wno-macro-redefined -Wno-shorten-64-to-32 -Wno-unused-command-line-argument -g"
|
||||||
|
|
||||||
|
# Common options for all builds
|
||||||
|
COMMON_CMAKE_ARGS=(
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_CODE_SIGNING_REQUIRED=NO
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY=""
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_CODE_SIGNING_ALLOWED=NO
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_DEBUG_INFORMATION_FORMAT="dwarf-with-dsym"
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_GCC_GENERATE_DEBUGGING_SYMBOLS=YES
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_COPY_PHASE_STRIP=NO
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_STRIP_INSTALLED_PRODUCT=NO
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
|
||||||
|
-DBUILD_SHARED_LIBS=${BUILD_SHARED_LIBS}
|
||||||
|
-DWHISPER_BUILD_EXAMPLES=${WHISPER_BUILD_EXAMPLES}
|
||||||
|
-DWHISPER_BUILD_TESTS=${WHISPER_BUILD_TESTS}
|
||||||
|
-DWHISPER_BUILD_SERVER=${WHISPER_BUILD_SERVER}
|
||||||
|
-DGGML_METAL_EMBED_LIBRARY=${GGML_METAL_EMBED_LIBRARY}
|
||||||
|
-DGGML_BLAS_DEFAULT=${GGML_BLAS_DEFAULT}
|
||||||
|
-DGGML_METAL=${GGML_METAL}
|
||||||
|
-DGGML_METAL_USE_BF16=${GGML_METAL_USE_BF16}
|
||||||
|
-DGGML_NATIVE=OFF
|
||||||
|
-DGGML_OPENMP=${GGML_OPENMP}
|
||||||
|
)
|
||||||
|
|
||||||
|
check_required_tool() {
|
||||||
|
local tool=$1
|
||||||
|
local install_message=$2
|
||||||
|
|
||||||
|
if ! command -v $tool &> /dev/null; then
|
||||||
|
echo "Error: $tool is required but not found."
|
||||||
|
echo "$install_message"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
echo "Checking for required tools..."
|
||||||
|
check_required_tool "cmake" "Please install CMake 3.28.0 or later (brew install cmake)"
|
||||||
|
check_required_tool "xcodebuild" "Please install Xcode and Xcode Command Line Tools (xcode-select --install)"
|
||||||
|
check_required_tool "libtool" "Please install libtool which should be available with Xcode Command Line Tools (CLT). Make sure Xcode CLT is installed (xcode-select --install)"
|
||||||
|
check_required_tool "dsymutil" "Please install Xcode and Xcode Command Line Tools (xcode-select --install)"
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
## Clean up previous builds
|
||||||
|
rm -rf build-apple
|
||||||
|
rm -rf build-ios-sim
|
||||||
|
rm -rf build-ios-device
|
||||||
|
rm -rf build-macos
|
||||||
|
rm -rf build-visionos
|
||||||
|
rm -rf build-visionos-sim
|
||||||
|
rm -rf build-tvos-sim
|
||||||
|
rm -rf build-tvos-device
|
||||||
|
|
||||||
|
# Setup the xcframework build directory structure
|
||||||
|
setup_framework_structure() {
|
||||||
|
local build_dir=$1
|
||||||
|
local min_os_version=$2
|
||||||
|
local platform=$3 # "ios", "macos", "visionos", or "tvos"
|
||||||
|
local framework_name="whisper"
|
||||||
|
|
||||||
|
echo "Creating ${platform}-style framework structure for ${build_dir}"
|
||||||
|
|
||||||
|
if [[ "$platform" == "macos" ]]; then
|
||||||
|
# macOS versioned structure uses versioned directories
|
||||||
|
mkdir -p ${build_dir}/framework/${framework_name}.framework/Versions/A/Headers
|
||||||
|
mkdir -p ${build_dir}/framework/${framework_name}.framework/Versions/A/Modules
|
||||||
|
mkdir -p ${build_dir}/framework/${framework_name}.framework/Versions/A/Resources
|
||||||
|
|
||||||
|
# Create symbolic links
|
||||||
|
ln -sf A ${build_dir}/framework/${framework_name}.framework/Versions/Current
|
||||||
|
ln -sf Versions/Current/Headers ${build_dir}/framework/${framework_name}.framework/Headers
|
||||||
|
ln -sf Versions/Current/Modules ${build_dir}/framework/${framework_name}.framework/Modules
|
||||||
|
ln -sf Versions/Current/Resources ${build_dir}/framework/${framework_name}.framework/Resources
|
||||||
|
ln -sf Versions/Current/${framework_name} ${build_dir}/framework/${framework_name}.framework/${framework_name}
|
||||||
|
|
||||||
|
# Set header and module paths
|
||||||
|
local header_path=${build_dir}/framework/${framework_name}.framework/Versions/A/Headers/
|
||||||
|
local module_path=${build_dir}/framework/${framework_name}.framework/Versions/A/Modules/
|
||||||
|
else
|
||||||
|
# iOS/VisionOS/tvOS use a flat structure
|
||||||
|
mkdir -p ${build_dir}/framework/${framework_name}.framework/Headers
|
||||||
|
mkdir -p ${build_dir}/framework/${framework_name}.framework/Modules
|
||||||
|
|
||||||
|
# Remove any existing structure to ensure clean build
|
||||||
|
rm -rf ${build_dir}/framework/${framework_name}.framework/Versions
|
||||||
|
|
||||||
|
# Set header and module paths
|
||||||
|
local header_path=${build_dir}/framework/${framework_name}.framework/Headers/
|
||||||
|
local module_path=${build_dir}/framework/${framework_name}.framework/Modules/
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Copy all required headers (common for all platforms)
|
||||||
|
cp include/whisper.h ${header_path}
|
||||||
|
cp ggml/include/ggml.h ${header_path}
|
||||||
|
cp ggml/include/ggml-alloc.h ${header_path}
|
||||||
|
cp ggml/include/ggml-backend.h ${header_path}
|
||||||
|
cp ggml/include/ggml-metal.h ${header_path}
|
||||||
|
cp ggml/include/ggml-cpu.h ${header_path}
|
||||||
|
cp ggml/include/ggml-blas.h ${header_path}
|
||||||
|
cp ggml/include/gguf.h ${header_path}
|
||||||
|
|
||||||
|
# Create module map (common for all platforms)
|
||||||
|
cat > ${module_path}module.modulemap << EOF
|
||||||
|
framework module whisper {
|
||||||
|
header "whisper.h"
|
||||||
|
header "ggml.h"
|
||||||
|
header "ggml-alloc.h"
|
||||||
|
header "ggml-backend.h"
|
||||||
|
header "ggml-metal.h"
|
||||||
|
header "ggml-cpu.h"
|
||||||
|
header "ggml-blas.h"
|
||||||
|
header "gguf.h"
|
||||||
|
|
||||||
|
link "c++"
|
||||||
|
link framework "Accelerate"
|
||||||
|
link framework "Metal"
|
||||||
|
link framework "Foundation"
|
||||||
|
|
||||||
|
export *
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Platform-specific settings for Info.plist
|
||||||
|
local platform_name=""
|
||||||
|
local sdk_name=""
|
||||||
|
local supported_platform=""
|
||||||
|
|
||||||
|
case "$platform" in
|
||||||
|
"ios")
|
||||||
|
platform_name="iphoneos"
|
||||||
|
sdk_name="iphoneos${min_os_version}"
|
||||||
|
supported_platform="iPhoneOS"
|
||||||
|
local plist_path="${build_dir}/framework/${framework_name}.framework/Info.plist"
|
||||||
|
local device_family=' <key>UIDeviceFamily</key>
|
||||||
|
<array>
|
||||||
|
<integer>1</integer>
|
||||||
|
<integer>2</integer>
|
||||||
|
</array>'
|
||||||
|
;;
|
||||||
|
"macos")
|
||||||
|
platform_name="macosx"
|
||||||
|
sdk_name="macosx${min_os_version}"
|
||||||
|
supported_platform="MacOSX"
|
||||||
|
local plist_path="${build_dir}/framework/${framework_name}.framework/Versions/A/Resources/Info.plist"
|
||||||
|
local device_family=""
|
||||||
|
;;
|
||||||
|
"visionos")
|
||||||
|
platform_name="xros"
|
||||||
|
sdk_name="xros${min_os_version}"
|
||||||
|
supported_platform="XRPlatform"
|
||||||
|
local plist_path="${build_dir}/framework/${framework_name}.framework/Info.plist"
|
||||||
|
local device_family=""
|
||||||
|
;;
|
||||||
|
"tvos")
|
||||||
|
platform_name="appletvos"
|
||||||
|
sdk_name="appletvos${min_os_version}"
|
||||||
|
supported_platform="AppleTVOS"
|
||||||
|
local plist_path="${build_dir}/framework/${framework_name}.framework/Info.plist"
|
||||||
|
local device_family=' <key>UIDeviceFamily</key>
|
||||||
|
<array>
|
||||||
|
<integer>3</integer>
|
||||||
|
</array>'
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Create Info.plist
|
||||||
|
cat > ${plist_path} << EOF
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||||
|
<plist version="1.0">
|
||||||
|
<dict>
|
||||||
|
<key>CFBundleDevelopmentRegion</key>
|
||||||
|
<string>en</string>
|
||||||
|
<key>CFBundleExecutable</key>
|
||||||
|
<string>whisper</string>
|
||||||
|
<key>CFBundleIdentifier</key>
|
||||||
|
<string>org.ggml.whisper</string>
|
||||||
|
<key>CFBundleInfoDictionaryVersion</key>
|
||||||
|
<string>6.0</string>
|
||||||
|
<key>CFBundleName</key>
|
||||||
|
<string>whisper</string>
|
||||||
|
<key>CFBundlePackageType</key>
|
||||||
|
<string>FMWK</string>
|
||||||
|
<key>CFBundleShortVersionString</key>
|
||||||
|
<string>1.0</string>
|
||||||
|
<key>CFBundleVersion</key>
|
||||||
|
<string>1</string>
|
||||||
|
<key>MinimumOSVersion</key>
|
||||||
|
<string>${min_os_version}</string>
|
||||||
|
<key>CFBundleSupportedPlatforms</key>
|
||||||
|
<array>
|
||||||
|
<string>${supported_platform}</string>
|
||||||
|
</array>${device_family}
|
||||||
|
<key>DTPlatformName</key>
|
||||||
|
<string>${platform_name}</string>
|
||||||
|
<key>DTSDKName</key>
|
||||||
|
<string>${sdk_name}</string>
|
||||||
|
</dict>
|
||||||
|
</plist>
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create dynamic libraries from static libraries.
|
||||||
|
combine_static_libraries() {
|
||||||
|
local build_dir="$1"
|
||||||
|
local release_dir="$2"
|
||||||
|
local platform="$3" # "ios", "macos", "visionos", or "tvos"
|
||||||
|
local is_simulator="$4"
|
||||||
|
local base_dir="$(pwd)"
|
||||||
|
local framework_name="whisper"
|
||||||
|
|
||||||
|
# Determine output path based on platform
|
||||||
|
local output_lib=""
|
||||||
|
if [[ "$platform" == "macos" ]]; then
|
||||||
|
# macOS uses versioned structure
|
||||||
|
output_lib="${build_dir}/framework/${framework_name}.framework/Versions/A/${framework_name}"
|
||||||
|
else
|
||||||
|
# iOS, visionOS, and tvOS use a directory flat structure
|
||||||
|
output_lib="${build_dir}/framework/${framework_name}.framework/${framework_name}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
local libs=(
|
||||||
|
"${base_dir}/${build_dir}/src/${release_dir}/libwhisper.a"
|
||||||
|
"${base_dir}/${build_dir}/ggml/src/${release_dir}/libggml.a"
|
||||||
|
"${base_dir}/${build_dir}/ggml/src/${release_dir}/libggml-base.a"
|
||||||
|
"${base_dir}/${build_dir}/ggml/src/${release_dir}/libggml-cpu.a"
|
||||||
|
"${base_dir}/${build_dir}/ggml/src/ggml-metal/${release_dir}/libggml-metal.a"
|
||||||
|
"${base_dir}/${build_dir}/ggml/src/ggml-blas/${release_dir}/libggml-blas.a"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create temporary directory for processing
|
||||||
|
local temp_dir="${base_dir}/${build_dir}/temp"
|
||||||
|
mkdir -p "${temp_dir}"
|
||||||
|
|
||||||
|
# Since we have multiple architectures libtool will find object files that do not
|
||||||
|
# match the target architecture. We suppress these warnings.
|
||||||
|
libtool -static -o "${temp_dir}/combined.a" "${libs[@]}" 2> /dev/null
|
||||||
|
|
||||||
|
# Determine SDK, architectures, and install_name based on platform and simulator flag.
|
||||||
|
local sdk=""
|
||||||
|
local archs=""
|
||||||
|
local min_version_flag=""
|
||||||
|
local install_name=""
|
||||||
|
|
||||||
|
case "$platform" in
|
||||||
|
"ios")
|
||||||
|
if [[ "$is_simulator" == "true" ]]; then
|
||||||
|
sdk="iphonesimulator"
|
||||||
|
archs="arm64 x86_64"
|
||||||
|
min_version_flag="-mios-simulator-version-min=${IOS_MIN_OS_VERSION}"
|
||||||
|
else
|
||||||
|
sdk="iphoneos"
|
||||||
|
archs="arm64"
|
||||||
|
min_version_flag="-mios-version-min=${IOS_MIN_OS_VERSION}"
|
||||||
|
fi
|
||||||
|
install_name="@rpath/whisper.framework/whisper"
|
||||||
|
;;
|
||||||
|
"macos")
|
||||||
|
sdk="macosx"
|
||||||
|
archs="arm64 x86_64"
|
||||||
|
min_version_flag="-mmacosx-version-min=${MACOS_MIN_OS_VERSION}"
|
||||||
|
install_name="@rpath/whisper.framework/Versions/Current/whisper"
|
||||||
|
;;
|
||||||
|
"visionos")
|
||||||
|
if [[ "$is_simulator" == "true" ]]; then
|
||||||
|
sdk="xrsimulator"
|
||||||
|
archs="arm64 x86_64"
|
||||||
|
min_version_flag="-mtargetos=xros${VISIONOS_MIN_OS_VERSION}-simulator"
|
||||||
|
else
|
||||||
|
sdk="xros"
|
||||||
|
archs="arm64"
|
||||||
|
min_version_flag="-mtargetos=xros${VISIONOS_MIN_OS_VERSION}"
|
||||||
|
fi
|
||||||
|
# Use flat structure for visionOS, same as iOS
|
||||||
|
install_name="@rpath/whisper.framework/whisper"
|
||||||
|
;;
|
||||||
|
"tvos")
|
||||||
|
if [[ "$is_simulator" == "true" ]]; then
|
||||||
|
sdk="appletvsimulator"
|
||||||
|
archs="arm64 x86_64"
|
||||||
|
min_version_flag="-mtvos-simulator-version-min=${TVOS_MIN_OS_VERSION}"
|
||||||
|
else
|
||||||
|
sdk="appletvos"
|
||||||
|
archs="arm64"
|
||||||
|
min_version_flag="-mtvos-version-min=${TVOS_MIN_OS_VERSION}"
|
||||||
|
fi
|
||||||
|
install_name="@rpath/whisper.framework/whisper"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Build architecture flags
|
||||||
|
local arch_flags=""
|
||||||
|
for arch in $archs; do
|
||||||
|
arch_flags+=" -arch $arch"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Create dynamic library
|
||||||
|
echo "Creating dynamic library for ${platform}."
|
||||||
|
xcrun -sdk $sdk clang++ -dynamiclib \
|
||||||
|
-isysroot $(xcrun --sdk $sdk --show-sdk-path) \
|
||||||
|
$arch_flags \
|
||||||
|
$min_version_flag \
|
||||||
|
-Wl,-force_load,"${temp_dir}/combined.a" \
|
||||||
|
-framework Foundation -framework Metal -framework Accelerate \
|
||||||
|
-install_name "$install_name" \
|
||||||
|
-o "${base_dir}/${output_lib}"
|
||||||
|
|
||||||
|
# Platform-specific post-processing for device builds
|
||||||
|
if [[ "$is_simulator" == "false" ]]; then
|
||||||
|
if command -v vtool &>/dev/null; then
|
||||||
|
case "$platform" in
|
||||||
|
"ios")
|
||||||
|
echo "Marking binary as a framework binary for iOS..."
|
||||||
|
vtool -set-build-version ios ${IOS_MIN_OS_VERSION} ${IOS_MIN_OS_VERSION} -replace \
|
||||||
|
-output "${base_dir}/${output_lib}" "${base_dir}/${output_lib}"
|
||||||
|
;;
|
||||||
|
"visionos")
|
||||||
|
echo "Marking binary as a framework binary for visionOS..."
|
||||||
|
vtool -set-build-version xros ${VISIONOS_MIN_OS_VERSION} ${VISIONOS_MIN_OS_VERSION} -replace \
|
||||||
|
-output "${base_dir}/${output_lib}" "${base_dir}/${output_lib}"
|
||||||
|
;;
|
||||||
|
"tvos")
|
||||||
|
echo "Marking binary as a framework binary for tvOS..."
|
||||||
|
vtool -set-build-version tvos ${TVOS_MIN_OS_VERSION} ${TVOS_MIN_OS_VERSION} -replace \
|
||||||
|
-output "${base_dir}/${output_lib}" "${base_dir}/${output_lib}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
else
|
||||||
|
echo "Warning: vtool not found. Binary may not pass App Store validation."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Creating properly formatted dSYM..."
|
||||||
|
# Create a separate directory for dSYMs for all platforms
|
||||||
|
mkdir -p "${base_dir}/${build_dir}/dSYMs"
|
||||||
|
|
||||||
|
# iOS and visionOS style dSYM (flat structure)
|
||||||
|
if [[ "$platform" == "ios" || "$platform" == "visionos" || "$platform" == "tvos" ]]; then
|
||||||
|
# Generate dSYM in the dSYMs directory
|
||||||
|
xcrun dsymutil "${base_dir}/${output_lib}" -o "${base_dir}/${build_dir}/dSYMs/whisper.dSYM"
|
||||||
|
|
||||||
|
# Create a copy of the binary that will be stripped
|
||||||
|
cp "${base_dir}/${output_lib}" "${temp_dir}/binary_to_strip"
|
||||||
|
|
||||||
|
# Strip debug symbols from the copy
|
||||||
|
xcrun strip -S "${temp_dir}/binary_to_strip" -o "${temp_dir}/stripped_lib"
|
||||||
|
|
||||||
|
# Replace the original with the stripped version
|
||||||
|
mv "${temp_dir}/stripped_lib" "${base_dir}/${output_lib}"
|
||||||
|
else
|
||||||
|
# macOS style dSYM
|
||||||
|
# First strip debug info to a separate file
|
||||||
|
xcrun strip -S "${base_dir}/${output_lib}" -o "${temp_dir}/stripped_lib"
|
||||||
|
|
||||||
|
# Generate dSYM in the dSYMs directory
|
||||||
|
xcrun dsymutil "${base_dir}/${output_lib}" -o "${base_dir}/${build_dir}/dSYMs/whisper.dSYM"
|
||||||
|
|
||||||
|
# Replace original binary with stripped version
|
||||||
|
mv "${temp_dir}/stripped_lib" "${base_dir}/${output_lib}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove any automatically generated dSYM files in the framework structure as they will
|
||||||
|
# otherwise case Invalid Bundle Structure validation errors.
|
||||||
|
if [ -d "${base_dir}/${output_lib}.dSYM" ]; then
|
||||||
|
echo "Removing generated dSYM file in framework structure: ${base_dir}/${output_lib}.dSYM"
|
||||||
|
rm -rf "${base_dir}/${output_lib}.dSYM"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
rm -rf "${temp_dir}"
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "Building for iOS simulator..."
|
||||||
|
cmake -B build-ios-sim -G Xcode \
|
||||||
|
"${COMMON_CMAKE_ARGS[@]}" \
|
||||||
|
-DCMAKE_OSX_DEPLOYMENT_TARGET=${IOS_MIN_OS_VERSION} \
|
||||||
|
-DIOS=ON \
|
||||||
|
-DCMAKE_SYSTEM_NAME=iOS \
|
||||||
|
-DCMAKE_OSX_SYSROOT=iphonesimulator \
|
||||||
|
-DCMAKE_OSX_ARCHITECTURES="arm64;x86_64" \
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_SUPPORTED_PLATFORMS=iphonesimulator \
|
||||||
|
-DCMAKE_C_FLAGS="${COMMON_C_FLAGS}" \
|
||||||
|
-DCMAKE_CXX_FLAGS="${COMMON_CXX_FLAGS}" \
|
||||||
|
-S .
|
||||||
|
cmake --build build-ios-sim --config Release -- -quiet
|
||||||
|
|
||||||
|
echo "Building for iOS devices..."
|
||||||
|
cmake -B build-ios-device -G Xcode \
|
||||||
|
"${COMMON_CMAKE_ARGS[@]}" \
|
||||||
|
-DCMAKE_OSX_DEPLOYMENT_TARGET=${IOS_MIN_OS_VERSION} \
|
||||||
|
-DCMAKE_OSX_SYSROOT=iphoneos \
|
||||||
|
-DCMAKE_OSX_ARCHITECTURES="arm64" \
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_SUPPORTED_PLATFORMS=iphoneos \
|
||||||
|
-DCMAKE_C_FLAGS="${COMMON_C_FLAGS}" \
|
||||||
|
-DCMAKE_CXX_FLAGS="${COMMON_CXX_FLAGS}" \
|
||||||
|
-S .
|
||||||
|
cmake --build build-ios-device --config Release -- -quiet
|
||||||
|
|
||||||
|
echo "Building for macOS..."
|
||||||
|
cmake -B build-macos -G Xcode \
|
||||||
|
"${COMMON_CMAKE_ARGS[@]}" \
|
||||||
|
-DCMAKE_OSX_DEPLOYMENT_TARGET=${MACOS_MIN_OS_VERSION} \
|
||||||
|
-DCMAKE_OSX_ARCHITECTURES="arm64;x86_64" \
|
||||||
|
-DCMAKE_C_FLAGS="${COMMON_C_FLAGS}" \
|
||||||
|
-DCMAKE_CXX_FLAGS="${COMMON_CXX_FLAGS}" \
|
||||||
|
-S .
|
||||||
|
cmake --build build-macos --config Release -- -quiet
|
||||||
|
|
||||||
|
echo "Building for visionOS..."
|
||||||
|
cmake -B build-visionos -G Xcode \
|
||||||
|
"${COMMON_CMAKE_ARGS[@]}" \
|
||||||
|
-DCMAKE_OSX_DEPLOYMENT_TARGET=${VISIONOS_MIN_OS_VERSION} \
|
||||||
|
-DCMAKE_OSX_ARCHITECTURES="arm64" \
|
||||||
|
-DCMAKE_SYSTEM_NAME=visionOS \
|
||||||
|
-DCMAKE_OSX_SYSROOT=xros \
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_SUPPORTED_PLATFORMS=xros \
|
||||||
|
-DCMAKE_C_FLAGS="-D_XOPEN_SOURCE=700 -Du_int=unsigned\ int -Du_char=unsigned\ char -Du_short=unsigned\ short ${COMMON_C_FLAGS}" \
|
||||||
|
-DCMAKE_CXX_FLAGS="-D_XOPEN_SOURCE=700 -Du_int=unsigned\ int -Du_char=unsigned\ char -Du_short=unsigned\ short ${COMMON_CXX_FLAGS}" \
|
||||||
|
-S .
|
||||||
|
cmake --build build-visionos --config Release -- -quiet
|
||||||
|
|
||||||
|
echo "Building for visionOS simulator..."
|
||||||
|
cmake -B build-visionos-sim -G Xcode \
|
||||||
|
"${COMMON_CMAKE_ARGS[@]}" \
|
||||||
|
-DCMAKE_OSX_DEPLOYMENT_TARGET=${VISIONOS_MIN_OS_VERSION} \
|
||||||
|
-DCMAKE_OSX_ARCHITECTURES="arm64;x86_64" \
|
||||||
|
-DCMAKE_SYSTEM_NAME=visionOS \
|
||||||
|
-DCMAKE_OSX_SYSROOT=xrsimulator \
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_SUPPORTED_PLATFORMS=xrsimulator \
|
||||||
|
-DCMAKE_C_FLAGS="-D_XOPEN_SOURCE=700 -Du_int=unsigned\ int -Du_char=unsigned\ char -Du_short=unsigned\ short ${COMMON_C_FLAGS}" \
|
||||||
|
-DCMAKE_CXX_FLAGS="-D_XOPEN_SOURCE=700 -Du_int=unsigned\ int -Du_char=unsigned\ char -Du_short=unsigned\ short ${COMMON_CXX_FLAGS}" \
|
||||||
|
-S .
|
||||||
|
cmake --build build-visionos-sim --config Release -- -quiet
|
||||||
|
|
||||||
|
# Add tvOS builds (might need the same u_int definitions as watchOS and visionOS)
|
||||||
|
echo "Building for tvOS simulator..."
|
||||||
|
cmake -B build-tvos-sim -G Xcode \
|
||||||
|
"${COMMON_CMAKE_ARGS[@]}" \
|
||||||
|
-DCMAKE_OSX_DEPLOYMENT_TARGET=${TVOS_MIN_OS_VERSION} \
|
||||||
|
-DCMAKE_SYSTEM_NAME=tvOS \
|
||||||
|
-DCMAKE_OSX_SYSROOT=appletvsimulator \
|
||||||
|
-DCMAKE_OSX_ARCHITECTURES="arm64;x86_64" \
|
||||||
|
-DGGML_METAL=ON \
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_SUPPORTED_PLATFORMS=appletvsimulator \
|
||||||
|
-DCMAKE_C_FLAGS="${COMMON_C_FLAGS}" \
|
||||||
|
-DCMAKE_CXX_FLAGS="${COMMON_CXX_FLAGS}" \
|
||||||
|
-S .
|
||||||
|
cmake --build build-tvos-sim --config Release -- -quiet
|
||||||
|
|
||||||
|
echo "Building for tvOS devices..."
|
||||||
|
cmake -B build-tvos-device -G Xcode \
|
||||||
|
"${COMMON_CMAKE_ARGS[@]}" \
|
||||||
|
-DCMAKE_OSX_DEPLOYMENT_TARGET=${TVOS_MIN_OS_VERSION} \
|
||||||
|
-DCMAKE_SYSTEM_NAME=tvOS \
|
||||||
|
-DCMAKE_OSX_SYSROOT=appletvos \
|
||||||
|
-DCMAKE_OSX_ARCHITECTURES="arm64" \
|
||||||
|
-DGGML_METAL=ON \
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_SUPPORTED_PLATFORMS=appletvos \
|
||||||
|
-DCMAKE_C_FLAGS="${COMMON_C_FLAGS}" \
|
||||||
|
-DCMAKE_CXX_FLAGS="${COMMON_CXX_FLAGS}" \
|
||||||
|
-S .
|
||||||
|
cmake --build build-tvos-device --config Release -- -quiet
|
||||||
|
|
||||||
|
# Setup frameworks and copy binaries and headers
|
||||||
|
echo "Setting up framework structures..."
|
||||||
|
setup_framework_structure "build-ios-sim" ${IOS_MIN_OS_VERSION} "ios"
|
||||||
|
setup_framework_structure "build-ios-device" ${IOS_MIN_OS_VERSION} "ios"
|
||||||
|
setup_framework_structure "build-macos" ${MACOS_MIN_OS_VERSION} "macos"
|
||||||
|
setup_framework_structure "build-visionos" ${VISIONOS_MIN_OS_VERSION} "visionos"
|
||||||
|
setup_framework_structure "build-visionos-sim" ${VISIONOS_MIN_OS_VERSION} "visionos"
|
||||||
|
setup_framework_structure "build-tvos-sim" ${TVOS_MIN_OS_VERSION} "tvos"
|
||||||
|
setup_framework_structure "build-tvos-device" ${TVOS_MIN_OS_VERSION} "tvos"
|
||||||
|
|
||||||
|
# Create dynamic libraries from static libraries
|
||||||
|
echo "Creating dynamic libraries from static libraries..."
|
||||||
|
combine_static_libraries "build-ios-sim" "Release-iphonesimulator" "ios" "true"
|
||||||
|
combine_static_libraries "build-ios-device" "Release-iphoneos" "ios" "false"
|
||||||
|
combine_static_libraries "build-macos" "Release" "macos" "false"
|
||||||
|
combine_static_libraries "build-visionos" "Release-xros" "visionos" "false"
|
||||||
|
combine_static_libraries "build-visionos-sim" "Release-xrsimulator" "visionos" "true"
|
||||||
|
combine_static_libraries "build-tvos-sim" "Release-appletvsimulator" "tvos" "true"
|
||||||
|
combine_static_libraries "build-tvos-device" "Release-appletvos" "tvos" "false"
|
||||||
|
|
||||||
|
# Create XCFramework with correct debug symbols paths
|
||||||
|
echo "Creating XCFramework..."
|
||||||
|
xcodebuild -create-xcframework \
|
||||||
|
-framework $(pwd)/build-ios-sim/framework/whisper.framework \
|
||||||
|
-debug-symbols $(pwd)/build-ios-sim/dSYMs/whisper.dSYM \
|
||||||
|
-framework $(pwd)/build-ios-device/framework/whisper.framework \
|
||||||
|
-debug-symbols $(pwd)/build-ios-device/dSYMs/whisper.dSYM \
|
||||||
|
-framework $(pwd)/build-macos/framework/whisper.framework \
|
||||||
|
-debug-symbols $(pwd)/build-macos/dSYMS/whisper.dSYM \
|
||||||
|
-framework $(pwd)/build-visionos/framework/whisper.framework \
|
||||||
|
-debug-symbols $(pwd)/build-visionos/dSYMs/whisper.dSYM \
|
||||||
|
-framework $(pwd)/build-visionos-sim/framework/whisper.framework \
|
||||||
|
-debug-symbols $(pwd)/build-visionos-sim/dSYMs/whisper.dSYM \
|
||||||
|
-framework $(pwd)/build-tvos-device/framework/whisper.framework \
|
||||||
|
-debug-symbols $(pwd)/build-tvos-device/dSYMs/whisper.dSYM \
|
||||||
|
-framework $(pwd)/build-tvos-sim/framework/whisper.framework \
|
||||||
|
-debug-symbols $(pwd)/build-tvos-sim/dSYMs/whisper.dSYM \
|
||||||
|
-output $(pwd)/build-apple/whisper.xcframework
|
41
ci/README.md
Normal file
41
ci/README.md
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
# CI
|
||||||
|
|
||||||
|
In addition to [Github Actions](https://github.com/ggerganov/whisper.cpp/actions) `whisper.cpp` uses a custom CI framework:
|
||||||
|
|
||||||
|
https://github.com/ggml-org/ci
|
||||||
|
|
||||||
|
It monitors the `master` branch for new commits and runs the
|
||||||
|
[ci/run.sh](https://github.com/ggerganov/whisper.cpp/blob/master/ci/run.sh) script on dedicated cloud instances. This allows us
|
||||||
|
to execute heavier workloads compared to just using Github Actions. Also with time, the cloud instances will be scaled
|
||||||
|
to cover various hardware architectures, including GPU and Apple Silicon instances.
|
||||||
|
|
||||||
|
Collaborators can optionally trigger the CI run by adding the `ggml-ci` keyword to their commit message.
|
||||||
|
Only the branches of this repo are monitored for this keyword.
|
||||||
|
|
||||||
|
It is a good practice, before publishing changes to execute the full CI locally on your machine:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir tmp
|
||||||
|
|
||||||
|
# CPU-only build
|
||||||
|
bash ./ci/run.sh ./tmp/results ./tmp/mnt
|
||||||
|
|
||||||
|
# with CUDA support
|
||||||
|
GG_BUILD_CUDA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
|
||||||
|
```
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
The CI script supports several environment variables to control the build:
|
||||||
|
|
||||||
|
| Variable | Description |
|
||||||
|
|----------|-------------|
|
||||||
|
| `GG_BUILD_CUDA` | Enable NVIDIA CUDA GPU acceleration |
|
||||||
|
| `GG_BUILD_SYCL` | Enable Intel SYCL acceleration |
|
||||||
|
| `GG_BUILD_VULKAN` | Enable Vulkan GPU acceleration |
|
||||||
|
| `GG_BUILD_METAL` | Enable Metal acceleration on Apple Silicon |
|
||||||
|
| `GG_BUILD_BLAS` | Enable BLAS CPU acceleration |
|
||||||
|
| `GG_BUILD_OPENVINO` | Enable OpenVINO support |
|
||||||
|
| `GG_BUILD_COREML` | Enable Core ML support for Apple Neural Engine |
|
||||||
|
| `GG_BUILD_LOW_PERF` | Limit tests for low-performance hardware |
|
||||||
|
| `GG_BUILD_TEST_MODELS` | Comma-separated list of models to test (e.g. "tiny.en,tiny,base,medium", defaults to all models unless `GG_BUILD_LOW_PERF` is set) |
|
333
ci/run.sh
Normal file
333
ci/run.sh
Normal file
@ -0,0 +1,333 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# sample usage:
|
||||||
|
#
|
||||||
|
# mkdir tmp
|
||||||
|
#
|
||||||
|
# # CPU-only build
|
||||||
|
# bash ./ci/run.sh ./tmp/results ./tmp/mnt
|
||||||
|
#
|
||||||
|
# # with CUDA support
|
||||||
|
# GG_BUILD_CUDA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
|
||||||
|
#
|
||||||
|
|
||||||
|
if [ -z "$2" ]; then
|
||||||
|
echo "usage: $0 <output-dir> <mnt-dir>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "$1"
|
||||||
|
mkdir -p "$2"
|
||||||
|
|
||||||
|
OUT=$(realpath "$1")
|
||||||
|
MNT=$(realpath "$2")
|
||||||
|
|
||||||
|
rm -f "$OUT/*.log"
|
||||||
|
rm -f "$OUT/*.exit"
|
||||||
|
rm -f "$OUT/*.md"
|
||||||
|
|
||||||
|
sd=`dirname $0`
|
||||||
|
cd $sd/../
|
||||||
|
SRC=`pwd`
|
||||||
|
|
||||||
|
ALL_MODELS=( "tiny.en" "tiny" "base.en" "base" "small.en" "small" "medium.en" "medium" "large-v1" "large-v2" "large-v3" "large-v3-turbo" )
|
||||||
|
BENCH_N_THREADS=4
|
||||||
|
BENCH_ENCODER_ONLY=0
|
||||||
|
BENCH_FLASH_ATTN=0
|
||||||
|
|
||||||
|
# check for user-specified models first. if not specified, use fast models
|
||||||
|
if [ ! -z ${GG_BUILD_TEST_MODELS} ]; then
|
||||||
|
IFS=',' read -r -a MODELS <<< "${GG_BUILD_TEST_MODELS}"
|
||||||
|
else
|
||||||
|
if [ ! -z ${GG_BUILD_LOW_PERF} ]; then
|
||||||
|
MODELS=( "tiny" "base" "small" )
|
||||||
|
else
|
||||||
|
MODELS=("${ALL_MODELS[@]}")
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
CMAKE_EXTRA="-DWHISPER_FATAL_WARNINGS=ON"
|
||||||
|
|
||||||
|
if [ ! -z ${GG_BUILD_CUDA} ]; then
|
||||||
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_CUDA=ON -DCMAKE_CUDA_ARCHITECTURES=native"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -z ${GG_BUILD_SYCL} ]; then
|
||||||
|
if [ -z ${ONEAPI_ROOT} ]; then
|
||||||
|
echo "Not detected ONEAPI_ROOT, please install oneAPI base toolkit and enable it by:"
|
||||||
|
echo "source /opt/intel/oneapi/setvars.sh"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -z ${GG_BUILD_OPENVINO} ]; then
|
||||||
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DWHISPER_OPENVINO=ON"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -z ${GG_BUILD_METAL} ]; then
|
||||||
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -z ${GG_BUILD_VULKAN} ]; then
|
||||||
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_VULKAN=ON"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -z ${GG_BUILD_BLAS} ]; then
|
||||||
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_BLAS=ON"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -z ${GG_BUILD_COREML} ]; then
|
||||||
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DWHISPER_COREML=ON"
|
||||||
|
fi
|
||||||
|
|
||||||
|
## helpers
|
||||||
|
|
||||||
|
# download a file if it does not exist or if it is outdated
|
||||||
|
function gg_wget {
|
||||||
|
local out=$1
|
||||||
|
local url=$2
|
||||||
|
|
||||||
|
local cwd=`pwd`
|
||||||
|
|
||||||
|
mkdir -p $out
|
||||||
|
cd $out
|
||||||
|
|
||||||
|
# should not re-download if file is the same
|
||||||
|
wget -nv -N $url
|
||||||
|
|
||||||
|
cd $cwd
|
||||||
|
}
|
||||||
|
|
||||||
|
function gg_download_model {
|
||||||
|
local model_name=$1
|
||||||
|
local model_file="$MNT/models/ggml-${model_name}.bin"
|
||||||
|
|
||||||
|
if [ ! -f ${model_file} ]; then
|
||||||
|
local cwd=`pwd`
|
||||||
|
mkdir -p "$MNT/models"
|
||||||
|
cd "$MNT/models"
|
||||||
|
bash "$cwd/models/download-ggml-model.sh" ${model_name} .
|
||||||
|
cd "$cwd"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function gg_printf {
|
||||||
|
printf -- "$@" >> $OUT/README.md
|
||||||
|
}
|
||||||
|
|
||||||
|
# Helper function to check command exit status
|
||||||
|
function gg_check_last_command_status {
|
||||||
|
local exit_file=$1
|
||||||
|
local command_name=$2
|
||||||
|
|
||||||
|
local exit_status=$?
|
||||||
|
echo "$exit_status" > "$exit_file"
|
||||||
|
|
||||||
|
if [ $exit_status -ne 0 ]; then
|
||||||
|
echo "Error: Command $command_name failed with exit status $exit_status"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Usage: gg_run <test_name> [additional_args...]
|
||||||
|
#
|
||||||
|
# Parameters:
|
||||||
|
# test_name - Name of the test to run (calls gg_run_<test_name>)
|
||||||
|
# additional_args - Any additional arguments to pass to the test function (first argument is appended to the log filename)
|
||||||
|
function gg_run {
|
||||||
|
ci=$1
|
||||||
|
|
||||||
|
if [ $# -gt 1 ]; then
|
||||||
|
ci="${ci}_${2}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -o pipefail
|
||||||
|
set -x
|
||||||
|
|
||||||
|
gg_run_$1 "$@" | tee $OUT/$ci.log
|
||||||
|
cur=$?
|
||||||
|
echo "$cur" > $OUT/$ci.exit
|
||||||
|
|
||||||
|
set +x
|
||||||
|
set +o pipefail
|
||||||
|
|
||||||
|
gg_sum_$1 "$@"
|
||||||
|
|
||||||
|
ret=$((ret | cur))
|
||||||
|
}
|
||||||
|
|
||||||
|
function gg_check_build_requirements {
|
||||||
|
if ! command -v cmake &> /dev/null; then
|
||||||
|
gg_printf 'cmake not found, please install'
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v make &> /dev/null; then
|
||||||
|
gg_printf 'make not found, please install'
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
## ci
|
||||||
|
|
||||||
|
function gg_run_ctest {
|
||||||
|
mode=$2
|
||||||
|
|
||||||
|
cd ${SRC}
|
||||||
|
|
||||||
|
rm -rf build-ci-${mode} && mkdir build-ci-${mode} && cd build-ci-${mode}
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
gg_check_build_requirements
|
||||||
|
|
||||||
|
(time cmake -DCMAKE_BUILD_TYPE=${mode} ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
|
(time make -j$(nproc) ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
|
(time ctest --output-on-failure -L main -E test-opt ) 2>&1 | tee -a $OUT/${ci}-ctest.log
|
||||||
|
|
||||||
|
set +e
|
||||||
|
}
|
||||||
|
|
||||||
|
function gg_sum_ctest {
|
||||||
|
mode=$2
|
||||||
|
|
||||||
|
gg_printf '### %s\n\n' "${ci}"
|
||||||
|
|
||||||
|
gg_printf 'Runs ctest in '${mode}' mode\n'
|
||||||
|
gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
|
||||||
|
gg_printf '```\n'
|
||||||
|
gg_printf '%s\n' "$(cat $OUT/${ci}-ctest.log)"
|
||||||
|
gg_printf '```\n'
|
||||||
|
}
|
||||||
|
|
||||||
|
function gg_run_bench {
|
||||||
|
cd ${SRC}
|
||||||
|
|
||||||
|
# set flash attention flag if enabled
|
||||||
|
fattn=""
|
||||||
|
if [ "$BENCH_FLASH_ATTN" -eq 1 ]; then
|
||||||
|
fattn="-fa"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# run memcpy benchmark if not encoder-only mode
|
||||||
|
if [ "$BENCH_ENCODER_ONLY" -eq 0 ]; then
|
||||||
|
echo "Running memcpy benchmark"
|
||||||
|
(time ./build-ci-release/bin/whisper-bench -w 1 -t $BENCH_N_THREADS 2>&1) | tee -a $OUT/${ci}-memcpy.log
|
||||||
|
gg_check_last_command_status "$OUT/${ci}-memcpy.exit" "memcpy benchmark"
|
||||||
|
|
||||||
|
echo "Running ggml_mul_mat benchmark with $BENCH_N_THREADS threads"
|
||||||
|
(time ./build-ci-release/bin/whisper-bench -w 2 -t $BENCH_N_THREADS 2>&1) | tee -a $OUT/${ci}-mul_mat.log
|
||||||
|
gg_check_last_command_status "$OUT/${ci}-mul_mat.exit" "ggml_mul_mat benchmark"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Running benchmark for all models"
|
||||||
|
|
||||||
|
# generate header for the benchmark table
|
||||||
|
{
|
||||||
|
printf "| %16s | %13s | %3s | %3s | %7s | %7s | %7s | %7s | %7s |\n" "Config" "Model" "Th" "FA" "Enc." "Dec." "Bch5" "PP" "Commit"
|
||||||
|
printf "| %16s | %13s | %3s | %3s | %7s | %7s | %7s | %7s | %7s |\n" "---" "---" "---" "---" "---" "---" "---" "---" "---"
|
||||||
|
} | tee -a $OUT/${ci}-models-table.log
|
||||||
|
|
||||||
|
# run benchmark for each model
|
||||||
|
for model in "${MODELS[@]}"; do
|
||||||
|
echo "Benchmarking model: $model"
|
||||||
|
|
||||||
|
# run the benchmark and capture output
|
||||||
|
output=$(./build-ci-release/bin/whisper-bench -m $MNT/models/ggml-$model.bin -t $BENCH_N_THREADS $fattn 2>&1)
|
||||||
|
ret=$?
|
||||||
|
|
||||||
|
# save the raw output
|
||||||
|
echo "$output" > $OUT/${ci}-bench-$model.log
|
||||||
|
|
||||||
|
if [ $ret -eq 0 ]; then
|
||||||
|
# parse the benchmark results
|
||||||
|
encode_time=$(echo "$output" | grep "encode time" | awk '{print $11}')
|
||||||
|
decode_time=$(echo "$output" | grep "decode time" | awk '{print $11}')
|
||||||
|
batchd_time=$(echo "$output" | grep "batchd time" | awk '{print $11}')
|
||||||
|
prompt_time=$(echo "$output" | grep "prompt time" | awk '{print $11}')
|
||||||
|
system_info=$(echo "$output" | grep "system_info")
|
||||||
|
actual_threads=$(echo "$output" | grep "system_info" | awk '{print $4}')
|
||||||
|
|
||||||
|
# determine configuration
|
||||||
|
config=""
|
||||||
|
if [[ $system_info == *"AVX2 = 1"* ]]; then
|
||||||
|
config="$config AVX2"
|
||||||
|
fi
|
||||||
|
if [[ $system_info == *"NEON = 1"* ]]; then
|
||||||
|
config="$config NEON"
|
||||||
|
fi
|
||||||
|
if [[ $system_info == *"BLAS = 1"* ]]; then
|
||||||
|
config="$config BLAS"
|
||||||
|
fi
|
||||||
|
if [[ $system_info == *"COREML = 1"* ]]; then
|
||||||
|
config="$config COREML"
|
||||||
|
fi
|
||||||
|
if [[ $system_info == *"CUDA = 1"* ]]; then
|
||||||
|
config="$config CUDA"
|
||||||
|
fi
|
||||||
|
if [[ $system_info == *"METAL = 1"* ]]; then
|
||||||
|
config="$config METAL"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# get commit hash
|
||||||
|
commit=$(git rev-parse --short HEAD)
|
||||||
|
|
||||||
|
# add row to benchmark table
|
||||||
|
printf "| %16s | %13s | %3s | %3s | %7s | %7s | %7s | %7s | %7s |\n" \
|
||||||
|
"$config" "$model" "$actual_threads" "$BENCH_FLASH_ATTN" "$encode_time" "$decode_time" "$batchd_time" "$prompt_time" "$commit" \
|
||||||
|
| tee -a $OUT/${ci}-models-table.log
|
||||||
|
else
|
||||||
|
echo "Benchmark failed for model: $model" | tee -a $OUT/${ci}-bench-errors.log
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
function gg_sum_bench {
|
||||||
|
gg_printf '### %s\n\n' "${ci}"
|
||||||
|
|
||||||
|
gg_printf 'Whisper Benchmark Results\n'
|
||||||
|
gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
|
||||||
|
|
||||||
|
# show memcpy and ggml_mul_mat benchmark results if available
|
||||||
|
if [ "$BENCH_ENCODER_ONLY" -eq 0 ]; then
|
||||||
|
if [ -f "$OUT/${ci}-memcpy.log" ]; then
|
||||||
|
gg_printf '#### memcpy Benchmark\n\n'
|
||||||
|
gg_printf '```\n%s\n```\n\n' "$(cat $OUT/${ci}-memcpy.log)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "$OUT/${ci}-mul_mat.log" ]; then
|
||||||
|
gg_printf '#### ggml_mul_mat Benchmark\n\n'
|
||||||
|
gg_printf '```\n%s\n```\n\n' "$(cat $OUT/${ci}-mul_mat.log)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# show model benchmark results
|
||||||
|
gg_printf '#### Model Benchmarks\n\n'
|
||||||
|
if [ -f "$OUT/${ci}-models-table.log" ]; then
|
||||||
|
gg_printf '%s\n\n' "$(cat $OUT/${ci}-models-table.log)"
|
||||||
|
else
|
||||||
|
gg_printf 'No model benchmark results available.\n\n'
|
||||||
|
fi
|
||||||
|
|
||||||
|
# show any errors that occurred
|
||||||
|
if [ -f "$OUT/${ci}-bench-errors.log" ]; then
|
||||||
|
gg_printf '#### Benchmark Errors\n\n'
|
||||||
|
gg_printf '```\n%s\n```\n\n' "$(cat $OUT/${ci}-bench-errors.log)"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
ret=0
|
||||||
|
|
||||||
|
for model in "${MODELS[@]}"; do
|
||||||
|
test $ret -eq 0 && gg_download_model ${model}
|
||||||
|
done
|
||||||
|
|
||||||
|
test $ret -eq 0 && gg_run ctest debug
|
||||||
|
test $ret -eq 0 && gg_run ctest release
|
||||||
|
|
||||||
|
test $ret -eq 0 && gg_run bench
|
||||||
|
|
||||||
|
exit $ret
|
28
close-issue.yml
Normal file
28
close-issue.yml
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
name: Close inactive issues
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "42 0 * * *"
|
||||||
|
|
||||||
|
# Fine-grant permission
|
||||||
|
# https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
close-issues:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v5
|
||||||
|
with:
|
||||||
|
exempt-issue-labels: "refactor,help wanted,good first issue,research,bug,roadmap"
|
||||||
|
days-before-issue-stale: 30
|
||||||
|
days-before-issue-close: 14
|
||||||
|
stale-issue-label: "stale"
|
||||||
|
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
||||||
|
days-before-pr-stale: -1
|
||||||
|
days-before-pr-close: -1
|
||||||
|
operations-per-run: 10000
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
@ -13,5 +13,4 @@ set_target_properties(${TARGET}
|
|||||||
PROPERTIES
|
PROPERTIES
|
||||||
EXPORT_COMPILE_COMMANDS ON
|
EXPORT_COMPILE_COMMANDS ON
|
||||||
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin"
|
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin"
|
||||||
INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib"
|
|
||||||
)
|
)
|
||||||
|
@ -42,6 +42,8 @@ endif()
|
|||||||
if(MSVC)
|
if(MSVC)
|
||||||
set(BUILD_COMPILER "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}")
|
set(BUILD_COMPILER "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}")
|
||||||
set(BUILD_TARGET ${CMAKE_VS_PLATFORM_NAME})
|
set(BUILD_TARGET ${CMAKE_VS_PLATFORM_NAME})
|
||||||
|
add_compile_options("$<$<COMPILE_LANGUAGE:C>:/utf-8>")
|
||||||
|
add_compile_options("$<$<COMPILE_LANGUAGE:CXX>:/utf-8>")
|
||||||
else()
|
else()
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND sh -c "$@ --version | head -1" _ ${CMAKE_C_COMPILER}
|
COMMAND sh -c "$@ --version | head -1" _ ${CMAKE_C_COMPILER}
|
||||||
|
@ -14,10 +14,6 @@ if (WHISPER_SDL2)
|
|||||||
message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
|
message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (WHISPER_CLBLAST)
|
|
||||||
find_package(CLBlast REQUIRED)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# common
|
# common
|
||||||
|
|
||||||
set(TARGET common)
|
set(TARGET common)
|
||||||
@ -56,6 +52,8 @@ add_library(${TARGET} STATIC
|
|||||||
common.cpp
|
common.cpp
|
||||||
common-ggml.h
|
common-ggml.h
|
||||||
common-ggml.cpp
|
common-ggml.cpp
|
||||||
|
common-whisper.h
|
||||||
|
common-whisper.cpp
|
||||||
grammar-parser.h
|
grammar-parser.h
|
||||||
grammar-parser.cpp
|
grammar-parser.cpp
|
||||||
${COMMON_SOURCES_FFMPEG}
|
${COMMON_SOURCES_FFMPEG}
|
||||||
@ -63,7 +61,7 @@ add_library(${TARGET} STATIC
|
|||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE whisper ${COMMON_EXTRA_LIBS})
|
target_link_libraries(${TARGET} PRIVATE whisper ${COMMON_EXTRA_LIBS} ${CMAKE_DL_LIBS})
|
||||||
|
|
||||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
||||||
@ -97,52 +95,29 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
|||||||
|
|
||||||
if (EMSCRIPTEN)
|
if (EMSCRIPTEN)
|
||||||
add_subdirectory(whisper.wasm)
|
add_subdirectory(whisper.wasm)
|
||||||
set_target_properties(libmain PROPERTIES FOLDER "libs")
|
|
||||||
add_subdirectory(stream.wasm)
|
add_subdirectory(stream.wasm)
|
||||||
set_target_properties(libstream PROPERTIES FOLDER "libs")
|
|
||||||
add_subdirectory(command.wasm)
|
add_subdirectory(command.wasm)
|
||||||
set_target_properties(libcommand PROPERTIES FOLDER "libs")
|
|
||||||
#add_subdirectory(talk.wasm)
|
|
||||||
#set_target_properties(libtalk PROPERTIES FOLDER "libs")
|
|
||||||
add_subdirectory(bench.wasm)
|
add_subdirectory(bench.wasm)
|
||||||
set_target_properties(libbench PROPERTIES FOLDER "libs")
|
|
||||||
elseif(CMAKE_JS_VERSION)
|
elseif(CMAKE_JS_VERSION)
|
||||||
add_subdirectory(addon.node)
|
add_subdirectory(addon.node)
|
||||||
set_target_properties(addon.node PROPERTIES FOLDER "examples")
|
|
||||||
else()
|
else()
|
||||||
add_subdirectory(main)
|
add_subdirectory(cli)
|
||||||
set_target_properties(main PROPERTIES FOLDER "examples")
|
|
||||||
if (WHISPER_SDL2)
|
|
||||||
add_subdirectory(stream)
|
|
||||||
set_target_properties(stream PROPERTIES FOLDER "examples")
|
|
||||||
endif (WHISPER_SDL2)
|
|
||||||
add_subdirectory(server)
|
|
||||||
set_target_properties(server PROPERTIES FOLDER "examples")
|
|
||||||
if (WHISPER_SDL2)
|
|
||||||
add_subdirectory(command)
|
|
||||||
set_target_properties(command PROPERTIES FOLDER "examples")
|
|
||||||
endif (WHISPER_SDL2)
|
|
||||||
add_subdirectory(bench)
|
add_subdirectory(bench)
|
||||||
set_target_properties(bench PROPERTIES FOLDER "examples")
|
add_subdirectory(server)
|
||||||
add_subdirectory(quantize)
|
add_subdirectory(quantize)
|
||||||
set_target_properties(quantize PROPERTIES FOLDER "examples")
|
if (WHISPER_SDL2)
|
||||||
if (WHISPER_SDL2)
|
add_subdirectory(stream)
|
||||||
# TODO: disabled until update
|
add_subdirectory(command)
|
||||||
# https://github.com/ggerganov/whisper.cpp/issues/1818
|
add_subdirectory(talk-llama)
|
||||||
#add_subdirectory(talk)
|
add_subdirectory(lsp)
|
||||||
#set_target_properties(talk PROPERTIES FOLDER "examples")
|
if (GGML_SYCL)
|
||||||
add_subdirectory(talk-llama)
|
add_subdirectory(sycl)
|
||||||
set_target_properties(talk-llama PROPERTIES FOLDER "examples")
|
endif()
|
||||||
add_subdirectory(lsp)
|
endif (WHISPER_SDL2)
|
||||||
set_target_properties(lsp PROPERTIES FOLDER "examples")
|
|
||||||
if (GGML_SYCL)
|
add_subdirectory(deprecation-warning)
|
||||||
add_subdirectory(sycl)
|
|
||||||
set_target_properties(ls-sycl-device PROPERTIES FOLDER "examples")
|
|
||||||
endif()
|
|
||||||
endif (WHISPER_SDL2)
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (WHISPER_SDL2)
|
if (WHISPER_SDL2)
|
||||||
add_subdirectory(wchess)
|
add_subdirectory(wchess)
|
||||||
set_target_properties(wchess PROPERTIES FOLDER "examples")
|
|
||||||
endif (WHISPER_SDL2)
|
endif (WHISPER_SDL2)
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#include "napi.h"
|
#include "napi.h"
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
#include "common-whisper.h"
|
||||||
|
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
|
||||||
@ -171,8 +172,8 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
|
|
||||||
// read the input audio file if params.pcmf32 is not provided
|
// read the input audio file if params.pcmf32 is not provided
|
||||||
if (params.pcmf32.empty()) {
|
if (params.pcmf32.empty()) {
|
||||||
if (!::read_wav(fname_inp, pcmf32, pcmf32s, params.diarize)) {
|
if (!::read_audio_data(fname_inp, pcmf32, pcmf32s, params.diarize)) {
|
||||||
fprintf(stderr, "error: failed to read WAV file '%s'\n", fname_inp.c_str());
|
fprintf(stderr, "error: failed to read audio file '%s'\n", fname_inp.c_str());
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -330,6 +331,7 @@ Napi::Value whisper(const Napi::CallbackInfo& info) {
|
|||||||
bool no_timestamps = whisper_params.Get("no_timestamps").As<Napi::Boolean>();
|
bool no_timestamps = whisper_params.Get("no_timestamps").As<Napi::Boolean>();
|
||||||
int32_t audio_ctx = whisper_params.Get("audio_ctx").As<Napi::Number>();
|
int32_t audio_ctx = whisper_params.Get("audio_ctx").As<Napi::Number>();
|
||||||
bool comma_in_time = whisper_params.Get("comma_in_time").As<Napi::Boolean>();
|
bool comma_in_time = whisper_params.Get("comma_in_time").As<Napi::Boolean>();
|
||||||
|
int32_t max_len = whisper_params.Get("max_len").As<Napi::Number>();
|
||||||
|
|
||||||
Napi::Value pcmf32Value = whisper_params.Get("pcmf32");
|
Napi::Value pcmf32Value = whisper_params.Get("pcmf32");
|
||||||
std::vector<float> pcmf32_vec;
|
std::vector<float> pcmf32_vec;
|
||||||
@ -352,6 +354,7 @@ Napi::Value whisper(const Napi::CallbackInfo& info) {
|
|||||||
params.audio_ctx = audio_ctx;
|
params.audio_ctx = audio_ctx;
|
||||||
params.pcmf32 = pcmf32_vec;
|
params.pcmf32 = pcmf32_vec;
|
||||||
params.comma_in_time = comma_in_time;
|
params.comma_in_time = comma_in_time;
|
||||||
|
params.max_len = max_len;
|
||||||
|
|
||||||
Napi::Function callback = info[1].As<Napi::Function>();
|
Napi::Function callback = info[1].As<Napi::Function>();
|
||||||
Worker* worker = new Worker(callback, params);
|
Worker* worker = new Worker(callback, params);
|
||||||
|
@ -18,6 +18,7 @@ const whisperParams = {
|
|||||||
translate: true,
|
translate: true,
|
||||||
no_timestamps: false,
|
no_timestamps: false,
|
||||||
audio_ctx: 0,
|
audio_ctx: 0,
|
||||||
|
max_len: 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
const arguments = process.argv.slice(2);
|
const arguments = process.argv.slice(2);
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
set(TARGET bench)
|
set(TARGET whisper-bench)
|
||||||
add_executable(${TARGET} bench.cpp)
|
add_executable(${TARGET} bench.cpp)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE whisper ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# bench
|
# whisper.cpp/examples/bench
|
||||||
|
|
||||||
A very basic tool for benchmarking the inference performance on your device. The tool simply runs the Encoder part of
|
A very basic tool for benchmarking the inference performance on your device. The tool simply runs the Encoder part of
|
||||||
the transformer on some random audio data and records the execution time. This way we can have an objective comparison
|
the transformer on some random audio data and records the execution time. This way we can have an objective comparison
|
||||||
@ -7,11 +7,8 @@ of the performance of the model for various setups.
|
|||||||
Benchmark results are tracked in the following Github issue: https://github.com/ggerganov/whisper.cpp/issues/89
|
Benchmark results are tracked in the following Github issue: https://github.com/ggerganov/whisper.cpp/issues/89
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# build the bench tool
|
# run the bench too on the small.en model using 4 threads
|
||||||
$ make bench
|
$ ./build/bin/whisper-bench -m ./models/ggml-small.en.bin -t 4
|
||||||
|
|
||||||
# run it on the small.en model using 4 threads
|
|
||||||
$ ./bench -m ./models/ggml-small.en.bin -t 4
|
|
||||||
|
|
||||||
whisper_model_load: loading model from './models/ggml-small.en.bin'
|
whisper_model_load: loading model from './models/ggml-small.en.bin'
|
||||||
whisper_model_load: n_vocab = 51864
|
whisper_model_load: n_vocab = 51864
|
||||||
|
@ -50,11 +50,11 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -t N, --threads N [%-7d] number of threads to use during computation\n", params.n_threads);
|
fprintf(stderr, " -t N, --threads N [%-7d] number of threads to use during computation\n", params.n_threads);
|
||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, " -w N, --what N [%-7d] what to benchmark:\n", params.what);
|
fprintf(stderr, " -w N, --what N [%-7d] what to benchmark:\n", params.what);
|
||||||
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
|
||||||
fprintf(stderr, " -fa, --flash-attn [%-7s] enable flash attention\n", params.flash_attn ? "true" : "false");
|
|
||||||
fprintf(stderr, " %-7s 0 - whisper\n", "");
|
fprintf(stderr, " %-7s 0 - whisper\n", "");
|
||||||
fprintf(stderr, " %-7s 1 - memcpy\n", "");
|
fprintf(stderr, " %-7s 1 - memcpy\n", "");
|
||||||
fprintf(stderr, " %-7s 2 - ggml_mul_mat\n", "");
|
fprintf(stderr, " %-7s 2 - ggml_mul_mat\n", "");
|
||||||
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
||||||
|
fprintf(stderr, " -fa, --flash-attn [%-7s] enable flash attention\n", params.flash_attn ? "true" : "false");
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
set(TARGET main)
|
set(TARGET whisper-cli)
|
||||||
add_executable(${TARGET} main.cpp)
|
add_executable(${TARGET} cli.cpp)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common whisper ${FFMPEG_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common whisper ${FFMPEG_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
@ -1,12 +1,12 @@
|
|||||||
# main
|
# whisper.cpp/examples/cli
|
||||||
|
|
||||||
This is the main example demonstrating most of the functionality of the Whisper model.
|
This is the main example demonstrating most of the functionality of the Whisper model.
|
||||||
It can be used as a reference for using the `whisper.cpp` library in other projects.
|
It can be used as a reference for using the `whisper.cpp` library in other projects.
|
||||||
|
|
||||||
```
|
```
|
||||||
./main -h
|
./build/bin/whisper-cli -h
|
||||||
|
|
||||||
usage: ./main [options] file0.wav file1.wav ...
|
usage: ./build-pkg/bin/whisper-cli [options] file0.wav file1.wav ...
|
||||||
|
|
||||||
options:
|
options:
|
||||||
-h, --help [default] show this help message and exit
|
-h, --help [default] show this help message and exit
|
||||||
@ -20,9 +20,12 @@ options:
|
|||||||
-sow, --split-on-word [false ] split on word rather than on token
|
-sow, --split-on-word [false ] split on word rather than on token
|
||||||
-bo N, --best-of N [5 ] number of best candidates to keep
|
-bo N, --best-of N [5 ] number of best candidates to keep
|
||||||
-bs N, --beam-size N [5 ] beam size for beam search
|
-bs N, --beam-size N [5 ] beam size for beam search
|
||||||
|
-ac N, --audio-ctx N [0 ] audio context size (0 - all)
|
||||||
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
||||||
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
||||||
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
||||||
|
-tp, --temperature N [0.00 ] The sampling temperature, between 0 and 1
|
||||||
|
-tpi, --temperature-inc N [0.20 ] The increment of temperature, between 0 and 1
|
||||||
-debug, --debug-mode [false ] enable debug mode (eg. dump log_mel)
|
-debug, --debug-mode [false ] enable debug mode (eg. dump log_mel)
|
||||||
-tr, --translate [false ] translate from source language to english
|
-tr, --translate [false ] translate from source language to english
|
||||||
-di, --diarize [false ] stereo audio diarization
|
-di, --diarize [false ] stereo audio diarization
|
||||||
@ -38,16 +41,23 @@ options:
|
|||||||
-oj, --output-json [false ] output result in a JSON file
|
-oj, --output-json [false ] output result in a JSON file
|
||||||
-ojf, --output-json-full [false ] include more information in the JSON file
|
-ojf, --output-json-full [false ] include more information in the JSON file
|
||||||
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
|
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
|
||||||
|
-np, --no-prints [false ] do not print anything other than the results
|
||||||
-ps, --print-special [false ] print special tokens
|
-ps, --print-special [false ] print special tokens
|
||||||
-pc, --print-colors [false ] print colors
|
-pc, --print-colors [false ] print colors
|
||||||
-pp, --print-progress [false ] print progress
|
-pp, --print-progress [false ] print progress
|
||||||
-nt, --no-timestamps [false ] do not print timestamps
|
-nt, --no-timestamps [false ] do not print timestamps
|
||||||
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
||||||
-dl, --detect-language [false ] exit after automatically detecting language
|
-dl, --detect-language [false ] exit after automatically detecting language
|
||||||
--prompt PROMPT [ ] initial prompt
|
--prompt PROMPT [ ] initial prompt (max n_text_ctx/2 tokens)
|
||||||
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
||||||
-f FNAME, --file FNAME [ ] input WAV file path
|
-f FNAME, --file FNAME [ ] input WAV file path
|
||||||
-oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference
|
-oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference
|
||||||
|
-dtw MODEL --dtw MODEL [ ] compute token-level timestamps
|
||||||
-ls, --log-score [false ] log best decoder scores of tokens
|
-ls, --log-score [false ] log best decoder scores of tokens
|
||||||
-ng, --no-gpu [false ] disable GPU
|
-ng, --no-gpu [false ] disable GPU
|
||||||
|
-fa, --flash-attn [false ] flash attention
|
||||||
|
--suppress-regex REGEX [ ] regular expression matching tokens to suppress
|
||||||
|
--grammar GRAMMAR [ ] GBNF grammar to guide decoding
|
||||||
|
--grammar-rule RULE [ ] top-level GBNF grammar rule name
|
||||||
|
--grammar-penalty N [100.0 ] scales down logits of nongrammar tokens
|
||||||
```
|
```
|
@ -1,4 +1,5 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
#include "common-whisper.h"
|
||||||
|
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
#include "grammar-parser.h"
|
#include "grammar-parser.h"
|
||||||
@ -6,12 +7,16 @@
|
|||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <regex>
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
|
#if defined(_WIN32)
|
||||||
|
#define NOMINMAX
|
||||||
|
#include <windows.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
#endif
|
#endif
|
||||||
@ -43,6 +48,7 @@ struct whisper_params {
|
|||||||
float word_thold = 0.01f;
|
float word_thold = 0.01f;
|
||||||
float entropy_thold = 2.40f;
|
float entropy_thold = 2.40f;
|
||||||
float logprob_thold = -1.00f;
|
float logprob_thold = -1.00f;
|
||||||
|
float no_speech_thold = 0.6f;
|
||||||
float grammar_penalty = 100.0f;
|
float grammar_penalty = 100.0f;
|
||||||
float temperature = 0.0f;
|
float temperature = 0.0f;
|
||||||
float temperature_inc = 0.2f;
|
float temperature_inc = 0.2f;
|
||||||
@ -70,6 +76,7 @@ struct whisper_params {
|
|||||||
bool log_score = false;
|
bool log_score = false;
|
||||||
bool use_gpu = true;
|
bool use_gpu = true;
|
||||||
bool flash_attn = false;
|
bool flash_attn = false;
|
||||||
|
bool suppress_nst = false;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string prompt;
|
std::string prompt;
|
||||||
@ -104,6 +111,11 @@ static char * whisper_param_turn_lowercase(char * in){
|
|||||||
return in;
|
return in;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static char * requires_value_error(const std::string & arg) {
|
||||||
|
fprintf(stderr, "error: argument %s requires value\n", arg.c_str());
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
@ -122,21 +134,23 @@ static bool whisper_params_parse(int argc, char ** argv, whisper_params & params
|
|||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); }
|
#define ARGV_NEXT (((i + 1) < argc) ? argv[++i] : requires_value_error(arg))
|
||||||
else if (arg == "-p" || arg == "--processors") { params.n_processors = std::stoi(argv[++i]); }
|
else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-ot" || arg == "--offset-t") { params.offset_t_ms = std::stoi(argv[++i]); }
|
else if (arg == "-p" || arg == "--processors") { params.n_processors = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-on" || arg == "--offset-n") { params.offset_n = std::stoi(argv[++i]); }
|
else if (arg == "-ot" || arg == "--offset-t") { params.offset_t_ms = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-d" || arg == "--duration") { params.duration_ms = std::stoi(argv[++i]); }
|
else if (arg == "-on" || arg == "--offset-n") { params.offset_n = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-mc" || arg == "--max-context") { params.max_context = std::stoi(argv[++i]); }
|
else if (arg == "-d" || arg == "--duration") { params.duration_ms = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-ml" || arg == "--max-len") { params.max_len = std::stoi(argv[++i]); }
|
else if (arg == "-mc" || arg == "--max-context") { params.max_context = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-bo" || arg == "--best-of") { params.best_of = std::stoi(argv[++i]); }
|
else if (arg == "-ml" || arg == "--max-len") { params.max_len = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(argv[++i]); }
|
else if (arg == "-bo" || arg == "--best-of") { params.best_of = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-wt" || arg == "--word-thold") { params.word_thold = std::stof(argv[++i]); }
|
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-et" || arg == "--entropy-thold") { params.entropy_thold = std::stof(argv[++i]); }
|
else if (arg == "-wt" || arg == "--word-thold") { params.word_thold = std::stof(ARGV_NEXT); }
|
||||||
else if (arg == "-lpt" || arg == "--logprob-thold") { params.logprob_thold = std::stof(argv[++i]); }
|
else if (arg == "-et" || arg == "--entropy-thold") { params.entropy_thold = std::stof(ARGV_NEXT); }
|
||||||
else if (arg == "-tp" || arg == "--temperature") { params.temperature = std::stof(argv[++i]); }
|
else if (arg == "-lpt" || arg == "--logprob-thold") { params.logprob_thold = std::stof(ARGV_NEXT); }
|
||||||
else if (arg == "-tpi" || arg == "--temperature-inc") { params.temperature_inc = std::stof(argv[++i]); }
|
else if (arg == "-nth" || arg == "--no-speech-thold") { params.no_speech_thold = std::stof(ARGV_NEXT); }
|
||||||
|
else if (arg == "-tp" || arg == "--temperature") { params.temperature = std::stof(ARGV_NEXT); }
|
||||||
|
else if (arg == "-tpi" || arg == "--temperature-inc") { params.temperature_inc = std::stof(ARGV_NEXT); }
|
||||||
else if (arg == "-debug"|| arg == "--debug-mode") { params.debug_mode = true; }
|
else if (arg == "-debug"|| arg == "--debug-mode") { params.debug_mode = true; }
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
|
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
|
||||||
@ -148,30 +162,31 @@ static bool whisper_params_parse(int argc, char ** argv, whisper_params & params
|
|||||||
else if (arg == "-osrt" || arg == "--output-srt") { params.output_srt = true; }
|
else if (arg == "-osrt" || arg == "--output-srt") { params.output_srt = true; }
|
||||||
else if (arg == "-owts" || arg == "--output-words") { params.output_wts = true; }
|
else if (arg == "-owts" || arg == "--output-words") { params.output_wts = true; }
|
||||||
else if (arg == "-olrc" || arg == "--output-lrc") { params.output_lrc = true; }
|
else if (arg == "-olrc" || arg == "--output-lrc") { params.output_lrc = true; }
|
||||||
else if (arg == "-fp" || arg == "--font-path") { params.font_path = argv[++i]; }
|
else if (arg == "-fp" || arg == "--font-path") { params.font_path = ARGV_NEXT; }
|
||||||
else if (arg == "-ocsv" || arg == "--output-csv") { params.output_csv = true; }
|
else if (arg == "-ocsv" || arg == "--output-csv") { params.output_csv = true; }
|
||||||
else if (arg == "-oj" || arg == "--output-json") { params.output_jsn = true; }
|
else if (arg == "-oj" || arg == "--output-json") { params.output_jsn = true; }
|
||||||
else if (arg == "-ojf" || arg == "--output-json-full"){ params.output_jsn_full = params.output_jsn = true; }
|
else if (arg == "-ojf" || arg == "--output-json-full"){ params.output_jsn_full = params.output_jsn = true; }
|
||||||
else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(argv[++i]); }
|
else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(ARGV_NEXT); }
|
||||||
else if (arg == "-np" || arg == "--no-prints") { params.no_prints = true; }
|
else if (arg == "-np" || arg == "--no-prints") { params.no_prints = true; }
|
||||||
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
||||||
else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
|
else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
|
||||||
else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
|
else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
|
||||||
else if (arg == "-nt" || arg == "--no-timestamps") { params.no_timestamps = true; }
|
else if (arg == "-nt" || arg == "--no-timestamps") { params.no_timestamps = true; }
|
||||||
else if (arg == "-l" || arg == "--language") { params.language = whisper_param_turn_lowercase(argv[++i]); }
|
else if (arg == "-l" || arg == "--language") { params.language = whisper_param_turn_lowercase(ARGV_NEXT); }
|
||||||
else if (arg == "-dl" || arg == "--detect-language") { params.detect_language = true; }
|
else if (arg == "-dl" || arg == "--detect-language") { params.detect_language = true; }
|
||||||
else if ( arg == "--prompt") { params.prompt = argv[++i]; }
|
else if ( arg == "--prompt") { params.prompt = ARGV_NEXT; }
|
||||||
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
else if (arg == "-m" || arg == "--model") { params.model = ARGV_NEXT; }
|
||||||
else if (arg == "-f" || arg == "--file") { params.fname_inp.emplace_back(argv[++i]); }
|
else if (arg == "-f" || arg == "--file") { params.fname_inp.emplace_back(ARGV_NEXT); }
|
||||||
else if (arg == "-oved" || arg == "--ov-e-device") { params.openvino_encode_device = argv[++i]; }
|
else if (arg == "-oved" || arg == "--ov-e-device") { params.openvino_encode_device = ARGV_NEXT; }
|
||||||
else if (arg == "-dtw" || arg == "--dtw") { params.dtw = argv[++i]; }
|
else if (arg == "-dtw" || arg == "--dtw") { params.dtw = ARGV_NEXT; }
|
||||||
else if (arg == "-ls" || arg == "--log-score") { params.log_score = true; }
|
else if (arg == "-ls" || arg == "--log-score") { params.log_score = true; }
|
||||||
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
else if ( arg == "--suppress-regex") { params.suppress_regex = argv[++i]; }
|
else if (arg == "-sns" || arg == "--suppress-nst") { params.suppress_nst = true; }
|
||||||
else if ( arg == "--grammar") { params.grammar = argv[++i]; }
|
else if ( arg == "--suppress-regex") { params.suppress_regex = ARGV_NEXT; }
|
||||||
else if ( arg == "--grammar-rule") { params.grammar_rule = argv[++i]; }
|
else if ( arg == "--grammar") { params.grammar = ARGV_NEXT; }
|
||||||
else if ( arg == "--grammar-penalty") { params.grammar_penalty = std::stof(argv[++i]); }
|
else if ( arg == "--grammar-rule") { params.grammar_rule = ARGV_NEXT; }
|
||||||
|
else if ( arg == "--grammar-penalty") { params.grammar_penalty = std::stof(ARGV_NEXT); }
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
@ -184,7 +199,8 @@ static bool whisper_params_parse(int argc, char ** argv, whisper_params & params
|
|||||||
|
|
||||||
static void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & params) {
|
static void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & params) {
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "usage: %s [options] file0.wav file1.wav ...\n", argv[0]);
|
fprintf(stderr, "usage: %s [options] file0 file1 ...\n", argv[0]);
|
||||||
|
fprintf(stderr, "supported audio formats: flac, mp3, ogg, wav\n");
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "options:\n");
|
fprintf(stderr, "options:\n");
|
||||||
fprintf(stderr, " -h, --help [default] show this help message and exit\n");
|
fprintf(stderr, " -h, --help [default] show this help message and exit\n");
|
||||||
@ -202,6 +218,7 @@ static void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params
|
|||||||
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n", params.word_thold);
|
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n", params.word_thold);
|
||||||
fprintf(stderr, " -et N, --entropy-thold N [%-7.2f] entropy threshold for decoder fail\n", params.entropy_thold);
|
fprintf(stderr, " -et N, --entropy-thold N [%-7.2f] entropy threshold for decoder fail\n", params.entropy_thold);
|
||||||
fprintf(stderr, " -lpt N, --logprob-thold N [%-7.2f] log probability threshold for decoder fail\n", params.logprob_thold);
|
fprintf(stderr, " -lpt N, --logprob-thold N [%-7.2f] log probability threshold for decoder fail\n", params.logprob_thold);
|
||||||
|
fprintf(stderr, " -nth N, --no-speech-thold N [%-7.2f] no speech threshold\n", params.no_speech_thold);
|
||||||
fprintf(stderr, " -tp, --temperature N [%-7.2f] The sampling temperature, between 0 and 1\n", params.temperature);
|
fprintf(stderr, " -tp, --temperature N [%-7.2f] The sampling temperature, between 0 and 1\n", params.temperature);
|
||||||
fprintf(stderr, " -tpi, --temperature-inc N [%-7.2f] The increment of temperature, between 0 and 1\n",params.temperature_inc);
|
fprintf(stderr, " -tpi, --temperature-inc N [%-7.2f] The increment of temperature, between 0 and 1\n",params.temperature_inc);
|
||||||
fprintf(stderr, " -debug, --debug-mode [%-7s] enable debug mode (eg. dump log_mel)\n", params.debug_mode ? "true" : "false");
|
fprintf(stderr, " -debug, --debug-mode [%-7s] enable debug mode (eg. dump log_mel)\n", params.debug_mode ? "true" : "false");
|
||||||
@ -228,12 +245,13 @@ static void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params
|
|||||||
fprintf(stderr, " -dl, --detect-language [%-7s] exit after automatically detecting language\n", params.detect_language ? "true" : "false");
|
fprintf(stderr, " -dl, --detect-language [%-7s] exit after automatically detecting language\n", params.detect_language ? "true" : "false");
|
||||||
fprintf(stderr, " --prompt PROMPT [%-7s] initial prompt (max n_text_ctx/2 tokens)\n", params.prompt.c_str());
|
fprintf(stderr, " --prompt PROMPT [%-7s] initial prompt (max n_text_ctx/2 tokens)\n", params.prompt.c_str());
|
||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, " -f FNAME, --file FNAME [%-7s] input WAV file path\n", "");
|
fprintf(stderr, " -f FNAME, --file FNAME [%-7s] input audio file path\n", "");
|
||||||
fprintf(stderr, " -oved D, --ov-e-device DNAME [%-7s] the OpenVINO device used for encode inference\n", params.openvino_encode_device.c_str());
|
fprintf(stderr, " -oved D, --ov-e-device DNAME [%-7s] the OpenVINO device used for encode inference\n", params.openvino_encode_device.c_str());
|
||||||
fprintf(stderr, " -dtw MODEL --dtw MODEL [%-7s] compute token-level timestamps\n", params.dtw.c_str());
|
fprintf(stderr, " -dtw MODEL --dtw MODEL [%-7s] compute token-level timestamps\n", params.dtw.c_str());
|
||||||
fprintf(stderr, " -ls, --log-score [%-7s] log best decoder scores of tokens\n", params.log_score?"true":"false");
|
fprintf(stderr, " -ls, --log-score [%-7s] log best decoder scores of tokens\n", params.log_score?"true":"false");
|
||||||
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
||||||
fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention\n", params.flash_attn ? "true" : "false");
|
fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention\n", params.flash_attn ? "true" : "false");
|
||||||
|
fprintf(stderr, " -sns, --suppress-nst [%-7s] suppress non-speech tokens\n", params.suppress_nst ? "true" : "false");
|
||||||
fprintf(stderr, " --suppress-regex REGEX [%-7s] regular expression matching tokens to suppress\n", params.suppress_regex.c_str());
|
fprintf(stderr, " --suppress-regex REGEX [%-7s] regular expression matching tokens to suppress\n", params.suppress_regex.c_str());
|
||||||
fprintf(stderr, " --grammar GRAMMAR [%-7s] GBNF grammar to guide decoding\n", params.grammar.c_str());
|
fprintf(stderr, " --grammar GRAMMAR [%-7s] GBNF grammar to guide decoding\n", params.grammar.c_str());
|
||||||
fprintf(stderr, " --grammar-rule RULE [%-7s] top-level GBNF grammar rule name\n", params.grammar_rule.c_str());
|
fprintf(stderr, " --grammar-rule RULE [%-7s] top-level GBNF grammar rule name\n", params.grammar_rule.c_str());
|
||||||
@ -904,6 +922,13 @@ static bool output_lrc(struct whisper_context * ctx, const char * fname, const w
|
|||||||
static void cb_log_disable(enum ggml_log_level , const char * , void * ) { }
|
static void cb_log_disable(enum ggml_log_level , const char * , void * ) { }
|
||||||
|
|
||||||
int main(int argc, char ** argv) {
|
int main(int argc, char ** argv) {
|
||||||
|
#if defined(_WIN32)
|
||||||
|
// Set the console output code page to UTF-8, while command line arguments
|
||||||
|
// are still encoded in the system's code page. In this way, we can print
|
||||||
|
// non-ASCII characters to the console, and access files with non-ASCII paths.
|
||||||
|
SetConsoleOutputCP(CP_UTF8);
|
||||||
|
#endif
|
||||||
|
|
||||||
whisper_params params;
|
whisper_params params;
|
||||||
|
|
||||||
// If the only argument starts with "@", read arguments line-by-line
|
// If the only argument starts with "@", read arguments line-by-line
|
||||||
@ -1045,8 +1070,8 @@ int main(int argc, char ** argv) {
|
|||||||
std::vector<float> pcmf32; // mono-channel F32 PCM
|
std::vector<float> pcmf32; // mono-channel F32 PCM
|
||||||
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
||||||
|
|
||||||
if (!::read_wav(fname_inp, pcmf32, pcmf32s, params.diarize)) {
|
if (!::read_audio_data(fname_inp, pcmf32, pcmf32s, params.diarize)) {
|
||||||
fprintf(stderr, "error: failed to read WAV file '%s'\n", fname_inp.c_str());
|
fprintf(stderr, "error: failed to read audio file '%s'\n", fname_inp.c_str());
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1121,9 +1146,12 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
wparams.entropy_thold = params.entropy_thold;
|
wparams.entropy_thold = params.entropy_thold;
|
||||||
wparams.logprob_thold = params.logprob_thold;
|
wparams.logprob_thold = params.logprob_thold;
|
||||||
|
wparams.no_speech_thold = params.no_speech_thold;
|
||||||
|
|
||||||
wparams.no_timestamps = params.no_timestamps;
|
wparams.no_timestamps = params.no_timestamps;
|
||||||
|
|
||||||
|
wparams.suppress_nst = params.suppress_nst;
|
||||||
|
|
||||||
whisper_print_user_data user_data = { ¶ms, &pcmf32s, 0 };
|
whisper_print_user_data user_data = { ¶ms, &pcmf32s, 0 };
|
||||||
|
|
||||||
const auto & grammar_parsed = params.grammar_parsed;
|
const auto & grammar_parsed = params.grammar_parsed;
|
@ -1,9 +1,10 @@
|
|||||||
if (WHISPER_SDL2)
|
if (WHISPER_SDL2)
|
||||||
# command
|
set(TARGET whisper-command)
|
||||||
set(TARGET command)
|
|
||||||
add_executable(${TARGET} command.cpp)
|
add_executable(${TARGET} command.cpp)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -1,14 +1,14 @@
|
|||||||
# command
|
# whisper.cpp/examples/command
|
||||||
|
|
||||||
This is a basic Voice Assistant example that accepts voice commands from the microphone.
|
This is a basic Voice Assistant example that accepts voice commands from the microphone.
|
||||||
More info is available in [issue #171](https://github.com/ggerganov/whisper.cpp/issues/171).
|
More info is available in [issue #171](https://github.com/ggerganov/whisper.cpp/issues/171).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Run with default arguments and small model
|
# Run with default arguments and small model
|
||||||
./command -m ./models/ggml-small.en.bin -t 8
|
./whisper-command -m ./models/ggml-small.en.bin -t 8
|
||||||
|
|
||||||
# On Raspberry Pi, use tiny or base models + "-ac 768" for better performance
|
# On Raspberry Pi, use tiny or base models + "-ac 768" for better performance
|
||||||
./command -m ./models/ggml-tiny.en.bin -ac 768 -t 3 -c 0
|
./whisper-command -m ./models/ggml-tiny.en.bin -ac 768 -t 3 -c 0
|
||||||
```
|
```
|
||||||
|
|
||||||
https://user-images.githubusercontent.com/1991296/204038393-2f846eae-c255-4099-a76d-5735c25c49da.mp4
|
https://user-images.githubusercontent.com/1991296/204038393-2f846eae-c255-4099-a76d-5735c25c49da.mp4
|
||||||
@ -23,10 +23,10 @@ Initial tests show that this approach might be extremely efficient in terms of p
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Run in guided mode, the list of allowed commands is in commands.txt
|
# Run in guided mode, the list of allowed commands is in commands.txt
|
||||||
./command -m ./models/ggml-base.en.bin -cmd ./examples/command/commands.txt
|
./whisper-command -m ./models/ggml-base.en.bin -cmd ./examples/command/commands.txt
|
||||||
|
|
||||||
# On Raspberry Pi, in guided mode you can use "-ac 128" for extra performance
|
# On Raspberry Pi, in guided mode you can use "-ac 128" for extra performance
|
||||||
./command -m ./models/ggml-tiny.en.bin -cmd ./examples/command/commands.txt -ac 128 -t 3 -c 0
|
./whisper-command -m ./models/ggml-tiny.en.bin -cmd ./examples/command/commands.txt -ac 128 -t 3 -c 0
|
||||||
```
|
```
|
||||||
|
|
||||||
https://user-images.githubusercontent.com/1991296/207435352-8fc4ed3f-bde5-4555-9b8b-aeeb76bee969.mp4
|
https://user-images.githubusercontent.com/1991296/207435352-8fc4ed3f-bde5-4555-9b8b-aeeb76bee969.mp4
|
||||||
@ -34,7 +34,7 @@ https://user-images.githubusercontent.com/1991296/207435352-8fc4ed3f-bde5-4555-9
|
|||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
The `command` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
The `whisper-command` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install SDL2
|
# Install SDL2
|
||||||
@ -47,5 +47,6 @@ sudo dnf install SDL2 SDL2-devel
|
|||||||
# Install SDL2 on Mac OS
|
# Install SDL2 on Mac OS
|
||||||
brew install sdl2
|
brew install sdl2
|
||||||
|
|
||||||
make command
|
cmake -B build -DWHISPER_SDL2=ON
|
||||||
|
cmake --build build --config Release
|
||||||
```
|
```
|
||||||
|
@ -11,16 +11,15 @@
|
|||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
#include "grammar-parser.h"
|
#include "grammar-parser.h"
|
||||||
|
|
||||||
#include <sstream>
|
#include <algorithm>
|
||||||
#include <cassert>
|
#include <chrono>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <mutex>
|
#include <map>
|
||||||
#include <regex>
|
#include <sstream>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <map>
|
|
||||||
|
|
||||||
// command-line parameters
|
// command-line parameters
|
||||||
struct whisper_params {
|
struct whisper_params {
|
||||||
|
@ -159,15 +159,11 @@ void audio_async::callback(uint8_t * stream, int len) {
|
|||||||
|
|
||||||
memcpy(&m_audio[m_audio_pos], stream, n0 * sizeof(float));
|
memcpy(&m_audio[m_audio_pos], stream, n0 * sizeof(float));
|
||||||
memcpy(&m_audio[0], stream + n0 * sizeof(float), (n_samples - n0) * sizeof(float));
|
memcpy(&m_audio[0], stream + n0 * sizeof(float), (n_samples - n0) * sizeof(float));
|
||||||
|
|
||||||
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
|
||||||
m_audio_len = m_audio.size();
|
|
||||||
} else {
|
} else {
|
||||||
memcpy(&m_audio[m_audio_pos], stream, n_samples * sizeof(float));
|
memcpy(&m_audio[m_audio_pos], stream, n_samples * sizeof(float));
|
||||||
|
|
||||||
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
|
||||||
m_audio_len = std::min(m_audio_len + n_samples, m_audio.size());
|
|
||||||
}
|
}
|
||||||
|
m_audio_pos = (m_audio_pos + n_samples) % m_audio.size();
|
||||||
|
m_audio_len = std::min(m_audio_len + n_samples, m_audio.size());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
172
examples/common-whisper.cpp
Normal file
172
examples/common-whisper.cpp
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
#define _USE_MATH_DEFINES // for M_PI
|
||||||
|
|
||||||
|
#include "common-whisper.h"
|
||||||
|
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
|
#include "whisper.h"
|
||||||
|
|
||||||
|
// third-party utilities
|
||||||
|
// use your favorite implementations
|
||||||
|
#define STB_VORBIS_HEADER_ONLY
|
||||||
|
#include "stb_vorbis.c" /* Enables Vorbis decoding. */
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
#ifndef NOMINMAX
|
||||||
|
#define NOMINMAX
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define MA_NO_DEVICE_IO
|
||||||
|
#define MA_NO_THREADING
|
||||||
|
#define MA_NO_ENCODING
|
||||||
|
#define MA_NO_GENERATION
|
||||||
|
#define MA_NO_RESOURCE_MANAGER
|
||||||
|
#define MA_NO_NODE_GRAPH
|
||||||
|
#define MINIAUDIO_IMPLEMENTATION
|
||||||
|
#include "miniaudio.h"
|
||||||
|
|
||||||
|
#if defined(_MSC_VER)
|
||||||
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <io.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
#include <fstream>
|
||||||
|
|
||||||
|
#ifdef WHISPER_FFMPEG
|
||||||
|
// as implemented in ffmpeg_trancode.cpp only embedded in common lib if whisper built with ffmpeg support
|
||||||
|
extern bool ffmpeg_decode_audio(const std::string & ifname, std::vector<uint8_t> & wav_data);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
bool read_audio_data(const std::string & fname, std::vector<float>& pcmf32, std::vector<std::vector<float>>& pcmf32s, bool stereo) {
|
||||||
|
std::vector<uint8_t> audio_data; // used for pipe input from stdin or ffmpeg decoding output
|
||||||
|
|
||||||
|
ma_result result;
|
||||||
|
ma_decoder_config decoder_config;
|
||||||
|
ma_decoder decoder;
|
||||||
|
|
||||||
|
decoder_config = ma_decoder_config_init(ma_format_f32, stereo ? 2 : 1, WHISPER_SAMPLE_RATE);
|
||||||
|
|
||||||
|
if (fname == "-") {
|
||||||
|
#ifdef _WIN32
|
||||||
|
_setmode(_fileno(stdin), _O_BINARY);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
uint8_t buf[1024];
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
const size_t n = fread(buf, 1, sizeof(buf), stdin);
|
||||||
|
if (n == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
audio_data.insert(audio_data.end(), buf, buf + n);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((result = ma_decoder_init_memory(audio_data.data(), audio_data.size(), &decoder_config, &decoder)) != MA_SUCCESS) {
|
||||||
|
|
||||||
|
fprintf(stderr, "Error: failed to open audio data from stdin (%s)\n", ma_result_description(result));
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: read %zu bytes from stdin\n", __func__, audio_data.size());
|
||||||
|
}
|
||||||
|
else if (((result = ma_decoder_init_file(fname.c_str(), &decoder_config, &decoder)) != MA_SUCCESS)) {
|
||||||
|
#if defined(WHISPER_FFMPEG)
|
||||||
|
if (ffmpeg_decode_audio(fname, audio_data) != 0) {
|
||||||
|
fprintf(stderr, "error: failed to ffmpeg decode '%s'\n", fname.c_str());
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((result = ma_decoder_init_memory(audio_data.data(), audio_data.size(), &decoder_config, &decoder)) != MA_SUCCESS) {
|
||||||
|
fprintf(stderr, "error: failed to read audio data as wav (%s)\n", ma_result_description(result));
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
if ((result = ma_decoder_init_memory(fname.c_str(), fname.size(), &decoder_config, &decoder)) != MA_SUCCESS) {
|
||||||
|
fprintf(stderr, "error: failed to read audio data as wav (%s)\n", ma_result_description(result));
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
ma_uint64 frame_count;
|
||||||
|
ma_uint64 frames_read;
|
||||||
|
|
||||||
|
if ((result = ma_decoder_get_length_in_pcm_frames(&decoder, &frame_count)) != MA_SUCCESS) {
|
||||||
|
fprintf(stderr, "error: failed to retrieve the length of the audio data (%s)\n", ma_result_description(result));
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
pcmf32.resize(stereo ? frame_count*2 : frame_count);
|
||||||
|
|
||||||
|
if ((result = ma_decoder_read_pcm_frames(&decoder, pcmf32.data(), frame_count, &frames_read)) != MA_SUCCESS) {
|
||||||
|
fprintf(stderr, "error: failed to read the frames of the audio data (%s)\n", ma_result_description(result));
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stereo) {
|
||||||
|
pcmf32s.resize(2);
|
||||||
|
pcmf32s[0].resize(frame_count);
|
||||||
|
pcmf32s[1].resize(frame_count);
|
||||||
|
for (uint64_t i = 0; i < frame_count; i++) {
|
||||||
|
pcmf32s[0][i] = pcmf32[2*i];
|
||||||
|
pcmf32s[1][i] = pcmf32[2*i + 1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ma_decoder_uninit(&decoder);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 500 -> 00:05.000
|
||||||
|
// 6000 -> 01:00.000
|
||||||
|
std::string to_timestamp(int64_t t, bool comma) {
|
||||||
|
int64_t msec = t * 10;
|
||||||
|
int64_t hr = msec / (1000 * 60 * 60);
|
||||||
|
msec = msec - hr * (1000 * 60 * 60);
|
||||||
|
int64_t min = msec / (1000 * 60);
|
||||||
|
msec = msec - min * (1000 * 60);
|
||||||
|
int64_t sec = msec / 1000;
|
||||||
|
msec = msec - sec * 1000;
|
||||||
|
|
||||||
|
char buf[32];
|
||||||
|
snprintf(buf, sizeof(buf), "%02d:%02d:%02d%s%03d", (int) hr, (int) min, (int) sec, comma ? "," : ".", (int) msec);
|
||||||
|
|
||||||
|
return std::string(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
int timestamp_to_sample(int64_t t, int n_samples, int whisper_sample_rate) {
|
||||||
|
return std::max(0, std::min((int) n_samples - 1, (int) ((t*whisper_sample_rate)/100)));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool speak_with_file(const std::string & command, const std::string & text, const std::string & path, int voice_id) {
|
||||||
|
std::ofstream speak_file(path.c_str());
|
||||||
|
if (speak_file.fail()) {
|
||||||
|
fprintf(stderr, "%s: failed to open speak_file\n", __func__);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
speak_file.write(text.c_str(), text.size());
|
||||||
|
speak_file.close();
|
||||||
|
int ret = system((command + " " + std::to_string(voice_id) + " " + path).c_str());
|
||||||
|
if (ret != 0) {
|
||||||
|
fprintf(stderr, "%s: failed to speak\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef STB_VORBIS_HEADER_ONLY
|
||||||
|
#include "stb_vorbis.c"
|
24
examples/common-whisper.h
Normal file
24
examples/common-whisper.h
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
// Read WAV audio file and store the PCM data into pcmf32
|
||||||
|
// fname can be a buffer of WAV data instead of a filename
|
||||||
|
// The sample rate of the audio must be equal to COMMON_SAMPLE_RATE
|
||||||
|
// If stereo flag is set and the audio has 2 channels, the pcmf32s will contain 2 channel PCM
|
||||||
|
bool read_audio_data(
|
||||||
|
const std::string & fname,
|
||||||
|
std::vector<float> & pcmf32,
|
||||||
|
std::vector<std::vector<float>> & pcmf32s,
|
||||||
|
bool stereo);
|
||||||
|
|
||||||
|
// convert timestamp to string, 6000 -> 01:00.000
|
||||||
|
std::string to_timestamp(int64_t t, bool comma = false);
|
||||||
|
|
||||||
|
// given a timestamp get the sample
|
||||||
|
int timestamp_to_sample(int64_t t, int n_samples, int whisper_sample_rate);
|
||||||
|
|
||||||
|
// write text to file, and call system("command voice_id file")
|
||||||
|
bool speak_with_file(const std::string & command, const std::string & text, const std::string & path, int voice_id);
|
@ -2,33 +2,18 @@
|
|||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
// third-party utilities
|
|
||||||
// use your favorite implementations
|
|
||||||
#define DR_WAV_IMPLEMENTATION
|
|
||||||
#include "dr_wav.h"
|
|
||||||
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
#include <codecvt>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <regex>
|
|
||||||
#include <locale>
|
#include <locale>
|
||||||
#include <codecvt>
|
#include <regex>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef _WIN32
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <io.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef WHISPER_FFMPEG
|
|
||||||
// as implemented in ffmpeg_trancode.cpp only embedded in common lib if whisper built with ffmpeg support
|
|
||||||
extern bool ffmpeg_decode_audio(const std::string & ifname, std::vector<uint8_t> & wav_data);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Function to check if the next argument exists
|
// Function to check if the next argument exists
|
||||||
static std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) {
|
static std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) {
|
||||||
if (i + 1 < argc && argv[i + 1][0] != '-') {
|
if (i + 1 < argc && argv[i + 1][0] != '-') {
|
||||||
@ -624,129 +609,6 @@ gpt_vocab::id gpt_sample_top_k_top_p_repeat(
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_wav_buffer(const std::string buf) {
|
|
||||||
// RIFF ref: https://en.wikipedia.org/wiki/Resource_Interchange_File_Format
|
|
||||||
// WAV ref: https://www.mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
|
|
||||||
if (buf.size() < 12 || buf.substr(0, 4) != "RIFF" || buf.substr(8, 4) != "WAVE") {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t chunk_size = *reinterpret_cast<const uint32_t*>(buf.data() + 4);
|
|
||||||
if (chunk_size + 8 != buf.size()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool read_wav(const std::string & fname, std::vector<float>& pcmf32, std::vector<std::vector<float>>& pcmf32s, bool stereo) {
|
|
||||||
drwav wav;
|
|
||||||
std::vector<uint8_t> wav_data; // used for pipe input from stdin or ffmpeg decoding output
|
|
||||||
|
|
||||||
if (fname == "-") {
|
|
||||||
{
|
|
||||||
#ifdef _WIN32
|
|
||||||
_setmode(_fileno(stdin), _O_BINARY);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
uint8_t buf[1024];
|
|
||||||
while (true)
|
|
||||||
{
|
|
||||||
const size_t n = fread(buf, 1, sizeof(buf), stdin);
|
|
||||||
if (n == 0) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
wav_data.insert(wav_data.end(), buf, buf + n);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (drwav_init_memory(&wav, wav_data.data(), wav_data.size(), nullptr) == false) {
|
|
||||||
fprintf(stderr, "error: failed to open WAV file from stdin\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
fprintf(stderr, "%s: read %zu bytes from stdin\n", __func__, wav_data.size());
|
|
||||||
}
|
|
||||||
else if (is_wav_buffer(fname)) {
|
|
||||||
if (drwav_init_memory(&wav, fname.c_str(), fname.size(), nullptr) == false) {
|
|
||||||
fprintf(stderr, "error: failed to open WAV file from fname buffer\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (drwav_init_file(&wav, fname.c_str(), nullptr) == false) {
|
|
||||||
#if defined(WHISPER_FFMPEG)
|
|
||||||
if (ffmpeg_decode_audio(fname, wav_data) != 0) {
|
|
||||||
fprintf(stderr, "error: failed to ffmpeg decode '%s' \n", fname.c_str());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (drwav_init_memory(&wav, wav_data.data(), wav_data.size(), nullptr) == false) {
|
|
||||||
fprintf(stderr, "error: failed to read wav data as wav \n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
fprintf(stderr, "error: failed to open '%s' as WAV file\n", fname.c_str());
|
|
||||||
return false;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
if (wav.channels != 1 && wav.channels != 2) {
|
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be mono or stereo\n", __func__, fname.c_str());
|
|
||||||
drwav_uninit(&wav);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (stereo && wav.channels != 2) {
|
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be stereo for diarization\n", __func__, fname.c_str());
|
|
||||||
drwav_uninit(&wav);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (wav.sampleRate != COMMON_SAMPLE_RATE) {
|
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be %i kHz\n", __func__, fname.c_str(), COMMON_SAMPLE_RATE/1000);
|
|
||||||
drwav_uninit(&wav);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (wav.bitsPerSample != 16) {
|
|
||||||
fprintf(stderr, "%s: WAV file '%s' must be 16-bit\n", __func__, fname.c_str());
|
|
||||||
drwav_uninit(&wav);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
const uint64_t n = wav_data.empty() ? wav.totalPCMFrameCount : wav_data.size()/(wav.channels*wav.bitsPerSample/8);
|
|
||||||
|
|
||||||
std::vector<int16_t> pcm16;
|
|
||||||
pcm16.resize(n*wav.channels);
|
|
||||||
drwav_read_pcm_frames_s16(&wav, n, pcm16.data());
|
|
||||||
drwav_uninit(&wav);
|
|
||||||
|
|
||||||
// convert to mono, float
|
|
||||||
pcmf32.resize(n);
|
|
||||||
if (wav.channels == 1) {
|
|
||||||
for (uint64_t i = 0; i < n; i++) {
|
|
||||||
pcmf32[i] = float(pcm16[i])/32768.0f;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for (uint64_t i = 0; i < n; i++) {
|
|
||||||
pcmf32[i] = float(pcm16[2*i] + pcm16[2*i + 1])/65536.0f;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (stereo) {
|
|
||||||
// convert to stereo, float
|
|
||||||
pcmf32s.resize(2);
|
|
||||||
|
|
||||||
pcmf32s[0].resize(n);
|
|
||||||
pcmf32s[1].resize(n);
|
|
||||||
for (uint64_t i = 0; i < n; i++) {
|
|
||||||
pcmf32s[0][i] = float(pcm16[2*i])/32768.0f;
|
|
||||||
pcmf32s[1][i] = float(pcm16[2*i + 1])/32768.0f;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void high_pass_filter(std::vector<float> & data, float cutoff, float sample_rate) {
|
void high_pass_filter(std::vector<float> & data, float cutoff, float sample_rate) {
|
||||||
const float rc = 1.0f / (2.0f * M_PI * cutoff);
|
const float rc = 1.0f / (2.0f * M_PI * cutoff);
|
||||||
const float dt = 1.0f / sample_rate;
|
const float dt = 1.0f / sample_rate;
|
||||||
@ -822,90 +684,7 @@ float similarity(const std::string & s0, const std::string & s1) {
|
|||||||
return 1.0f - (dist / std::max(s0.size(), s1.size()));
|
return 1.0f - (dist / std::max(s0.size(), s1.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool sam_params_parse(int argc, char ** argv, sam_params & params) {
|
bool is_file_exist(const char * filename) {
|
||||||
for (int i = 1; i < argc; i++) {
|
std::ifstream infile(filename);
|
||||||
std::string arg = argv[i];
|
|
||||||
|
|
||||||
if (arg == "-s" || arg == "--seed") {
|
|
||||||
params.seed = std::stoi(argv[++i]);
|
|
||||||
} else if (arg == "-t" || arg == "--threads") {
|
|
||||||
params.n_threads = std::stoi(argv[++i]);
|
|
||||||
} else if (arg == "-m" || arg == "--model") {
|
|
||||||
params.model = argv[++i];
|
|
||||||
} else if (arg == "-i" || arg == "--inp") {
|
|
||||||
params.fname_inp = argv[++i];
|
|
||||||
} else if (arg == "-o" || arg == "--out") {
|
|
||||||
params.fname_out = argv[++i];
|
|
||||||
} else if (arg == "-h" || arg == "--help") {
|
|
||||||
sam_print_usage(argc, argv, params);
|
|
||||||
exit(0);
|
|
||||||
} else {
|
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
|
||||||
sam_print_usage(argc, argv, params);
|
|
||||||
exit(0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void sam_print_usage(int /*argc*/, char ** argv, const sam_params & params) {
|
|
||||||
fprintf(stderr, "usage: %s [options]\n", argv[0]);
|
|
||||||
fprintf(stderr, "\n");
|
|
||||||
fprintf(stderr, "options:\n");
|
|
||||||
fprintf(stderr, " -h, --help show this help message and exit\n");
|
|
||||||
fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n");
|
|
||||||
fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
|
|
||||||
fprintf(stderr, " -m FNAME, --model FNAME\n");
|
|
||||||
fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
|
|
||||||
fprintf(stderr, " -i FNAME, --inp FNAME\n");
|
|
||||||
fprintf(stderr, " input file (default: %s)\n", params.fname_inp.c_str());
|
|
||||||
fprintf(stderr, " -o FNAME, --out FNAME\n");
|
|
||||||
fprintf(stderr, " output file (default: %s)\n", params.fname_out.c_str());
|
|
||||||
fprintf(stderr, "\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
// 500 -> 00:05.000
|
|
||||||
// 6000 -> 01:00.000
|
|
||||||
std::string to_timestamp(int64_t t, bool comma) {
|
|
||||||
int64_t msec = t * 10;
|
|
||||||
int64_t hr = msec / (1000 * 60 * 60);
|
|
||||||
msec = msec - hr * (1000 * 60 * 60);
|
|
||||||
int64_t min = msec / (1000 * 60);
|
|
||||||
msec = msec - min * (1000 * 60);
|
|
||||||
int64_t sec = msec / 1000;
|
|
||||||
msec = msec - sec * 1000;
|
|
||||||
|
|
||||||
char buf[32];
|
|
||||||
snprintf(buf, sizeof(buf), "%02d:%02d:%02d%s%03d", (int) hr, (int) min, (int) sec, comma ? "," : ".", (int) msec);
|
|
||||||
|
|
||||||
return std::string(buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
int timestamp_to_sample(int64_t t, int n_samples, int whisper_sample_rate) {
|
|
||||||
return std::max(0, std::min((int) n_samples - 1, (int) ((t*whisper_sample_rate)/100)));
|
|
||||||
}
|
|
||||||
|
|
||||||
bool is_file_exist(const char *fileName)
|
|
||||||
{
|
|
||||||
std::ifstream infile(fileName);
|
|
||||||
return infile.good();
|
return infile.good();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool speak_with_file(const std::string & command, const std::string & text, const std::string & path, int voice_id)
|
|
||||||
{
|
|
||||||
std::ofstream speak_file(path.c_str());
|
|
||||||
if (speak_file.fail()) {
|
|
||||||
fprintf(stderr, "%s: failed to open speak_file\n", __func__);
|
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
speak_file.write(text.c_str(), text.size());
|
|
||||||
speak_file.close();
|
|
||||||
int ret = system((command + " " + std::to_string(voice_id) + " " + path).c_str());
|
|
||||||
if (ret != 0) {
|
|
||||||
fprintf(stderr, "%s: failed to speak\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
@ -11,8 +11,6 @@
|
|||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
#define COMMON_SAMPLE_RATE 16000
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// GPT CLI argument parsing
|
// GPT CLI argument parsing
|
||||||
//
|
//
|
||||||
@ -136,19 +134,6 @@ gpt_vocab::id gpt_sample_top_k_top_p_repeat(
|
|||||||
// Audio utils
|
// Audio utils
|
||||||
//
|
//
|
||||||
|
|
||||||
// Check if a buffer is a WAV audio file
|
|
||||||
bool is_wav_buffer(const std::string buf);
|
|
||||||
|
|
||||||
// Read WAV audio file and store the PCM data into pcmf32
|
|
||||||
// fname can be a buffer of WAV data instead of a filename
|
|
||||||
// The sample rate of the audio must be equal to COMMON_SAMPLE_RATE
|
|
||||||
// If stereo flag is set and the audio has 2 channels, the pcmf32s will contain 2 channel PCM
|
|
||||||
bool read_wav(
|
|
||||||
const std::string & fname,
|
|
||||||
std::vector<float> & pcmf32,
|
|
||||||
std::vector<std::vector<float>> & pcmf32s,
|
|
||||||
bool stereo);
|
|
||||||
|
|
||||||
// Write PCM data into WAV audio file
|
// Write PCM data into WAV audio file
|
||||||
class wav_writer {
|
class wav_writer {
|
||||||
private:
|
private:
|
||||||
@ -266,23 +251,6 @@ bool vad_simple(
|
|||||||
// compute similarity between two strings using Levenshtein distance
|
// compute similarity between two strings using Levenshtein distance
|
||||||
float similarity(const std::string & s0, const std::string & s1);
|
float similarity(const std::string & s0, const std::string & s1);
|
||||||
|
|
||||||
//
|
|
||||||
// SAM argument parsing
|
|
||||||
//
|
|
||||||
|
|
||||||
struct sam_params {
|
|
||||||
int32_t seed = -1; // RNG seed
|
|
||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
|
||||||
|
|
||||||
std::string model = "models/sam-vit-b/ggml-model-f16.bin"; // model path
|
|
||||||
std::string fname_inp = "img.jpg";
|
|
||||||
std::string fname_out = "img.out";
|
|
||||||
};
|
|
||||||
|
|
||||||
bool sam_params_parse(int argc, char ** argv, sam_params & params);
|
|
||||||
|
|
||||||
void sam_print_usage(int argc, char ** argv, const sam_params & params);
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Terminal utils
|
// Terminal utils
|
||||||
//
|
//
|
||||||
@ -330,14 +298,5 @@ const std::vector<std::string> k_colors = {
|
|||||||
// Other utils
|
// Other utils
|
||||||
//
|
//
|
||||||
|
|
||||||
// convert timestamp to string, 6000 -> 01:00.000
|
|
||||||
std::string to_timestamp(int64_t t, bool comma = false);
|
|
||||||
|
|
||||||
// given a timestamp get the sample
|
|
||||||
int timestamp_to_sample(int64_t t, int n_samples, int whisper_sample_rate);
|
|
||||||
|
|
||||||
// check if file exists using ifstream
|
// check if file exists using ifstream
|
||||||
bool is_file_exist(const char *fileName);
|
bool is_file_exist(const char * filename);
|
||||||
|
|
||||||
// write text to file, and call system("command voice_id file")
|
|
||||||
bool speak_with_file(const std::string & command, const std::string & text, const std::string & path, int voice_id);
|
|
||||||
|
4
examples/deprecation-warning/CMakeLists.txt
Normal file
4
examples/deprecation-warning/CMakeLists.txt
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
add_executable(main ./deprecation-warning.cpp)
|
||||||
|
add_executable(bench ./deprecation-warning.cpp)
|
||||||
|
add_executable(stream ./deprecation-warning.cpp)
|
||||||
|
add_executable(command ./deprecation-warning.cpp)
|
17
examples/deprecation-warning/README.md
Normal file
17
examples/deprecation-warning/README.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Migration notice for binary filenames
|
||||||
|
|
||||||
|
> [!IMPORTANT]
|
||||||
|
[2024 Dec 20] Binaries have been renamed w/ a `whisper-` prefix. `main` is now `whisper-cli`, `server` is `whisper-server`, etc (https://github.com/ggerganov/whisper.cpp/pull/2648)
|
||||||
|
|
||||||
|
This migration was important, but it is a breaking change that may not always be immediately obvious to users.
|
||||||
|
|
||||||
|
Please update all scripts and workflows to use the new binary names.
|
||||||
|
|
||||||
|
| Old Filename | New Filename |
|
||||||
|
| ---- | ---- |
|
||||||
|
| main | whisper-cli |
|
||||||
|
| bench | whisper-bench |
|
||||||
|
| stream | whisper-stream |
|
||||||
|
| command | whisper-command |
|
||||||
|
| server | whisper-server |
|
||||||
|
| talk-llama | whisper-talk-llama |
|
38
examples/deprecation-warning/deprecation-warning.cpp
Normal file
38
examples/deprecation-warning/deprecation-warning.cpp
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// Warns users that this filename was deprecated, and provides a link for more information.
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
// Main
|
||||||
|
int main(int argc, char** argv) {
|
||||||
|
std::string filename = "main";
|
||||||
|
if (argc >= 1) {
|
||||||
|
filename = argv[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get only the program name from the full path
|
||||||
|
size_t pos = filename.find_last_of("/\\");
|
||||||
|
if (pos != std::string::npos) {
|
||||||
|
filename = filename.substr(pos+1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append "whisper-" to the beginning of filename to get the replacemnt filename
|
||||||
|
std::string replacement_filename = "whisper-" + filename;
|
||||||
|
|
||||||
|
// The exception is if the filename is "main", then our replacement filename is "whisper-cli"
|
||||||
|
if (filename == "main") {
|
||||||
|
replacement_filename = "whisper-cli";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (filename == "main.exe") {
|
||||||
|
replacement_filename = "whisper-cli.exe";
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stdout, "\n");
|
||||||
|
fprintf(stdout, "WARNING: The binary '%s' is deprecated.\n", filename.c_str());
|
||||||
|
fprintf(stdout, " Please use '%s' instead.\n", replacement_filename.c_str());
|
||||||
|
fprintf(stdout, " See https://github.com/ggerganov/whisper.cpp/tree/master/examples/deprecation-warning/README.md for more information.\n");
|
||||||
|
fprintf(stdout, "\n");
|
||||||
|
|
||||||
|
return EXIT_FAILURE;
|
||||||
|
}
|
8815
examples/dr_wav.h
8815
examples/dr_wav.h
File diff suppressed because it is too large
Load Diff
@ -11,7 +11,7 @@
|
|||||||
# Press Ctrl+C to stop recording
|
# Press Ctrl+C to stop recording
|
||||||
#
|
#
|
||||||
|
|
||||||
executable="./main"
|
executable="./build/bin/whisper-cli"
|
||||||
model="base.en"
|
model="base.en"
|
||||||
model_path="models/ggml-$model.bin"
|
model_path="models/ggml-$model.bin"
|
||||||
|
|
||||||
@ -41,20 +41,17 @@ fi
|
|||||||
# record some raw audio
|
# record some raw audio
|
||||||
sox -d rec.wav
|
sox -d rec.wav
|
||||||
|
|
||||||
# resample to 16kHz
|
|
||||||
ffmpeg -y -i ./rec.wav -ar 16000 -ac 1 -c:a pcm_s16le ./rec16.wav > /dev/null 2>&1
|
|
||||||
|
|
||||||
# run Whisper
|
# run Whisper
|
||||||
echo "Processing ..."
|
echo "Processing ..."
|
||||||
./main -m models/ggml-base.en.bin rec16.wav -owts > /dev/null 2>&1
|
${executable} -m models/ggml-base.en.bin rec.wav -owts > /dev/null 2>&1
|
||||||
|
|
||||||
# generate Karaoke video
|
# generate Karaoke video
|
||||||
echo "Generating video ..."
|
echo "Generating video ..."
|
||||||
source rec16.wav.wts > /dev/null 2>&1
|
source rec.wav.wts > /dev/null 2>&1
|
||||||
|
|
||||||
# play the video
|
# play the video
|
||||||
echo "Playing ./rec16.wav.mp4 ..."
|
echo "Playing ./rec16.wav.mp4 ..."
|
||||||
ffplay -loglevel 0 -autoexit ./rec16.wav.mp4
|
ffplay -loglevel 0 -autoexit ./rec.wav.mp4
|
||||||
|
|
||||||
echo "Done"
|
echo "Done"
|
||||||
exit 0
|
exit 0
|
||||||
|
@ -14,7 +14,7 @@ model="base.en"
|
|||||||
|
|
||||||
check_requirements()
|
check_requirements()
|
||||||
{
|
{
|
||||||
if ! command -v ./main &>/dev/null; then
|
if ! command -v ./build/bin/whisper-cli &>/dev/null; then
|
||||||
echo "whisper.cpp main executable is required (make)"
|
echo "whisper.cpp main executable is required (make)"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@ -100,7 +100,7 @@ while [ $running -eq 1 ]; do
|
|||||||
err=$(cat /tmp/whisper-live.err | wc -l)
|
err=$(cat /tmp/whisper-live.err | wc -l)
|
||||||
done
|
done
|
||||||
|
|
||||||
./main -t 8 -m ./models/ggml-${model}.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1
|
./build/bin/whisper-cli -t 8 -m ./models/ggml-${model}.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1
|
||||||
|
|
||||||
while [ $SECONDS -lt $((($i+1)*$step_s)) ]; do
|
while [ $SECONDS -lt $((($i+1)*$step_s)) ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
@ -109,4 +109,4 @@ while [ $running -eq 1 ]; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
killall -v ffmpeg
|
killall -v ffmpeg
|
||||||
killall -v main
|
killall -v whisper-cli
|
||||||
|
@ -3,14 +3,15 @@
|
|||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
#include "json.hpp"
|
#include "json.hpp"
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
#include <chrono>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
|
#include <deque>
|
||||||
|
#include <iostream>
|
||||||
|
#include <set>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <deque>
|
|
||||||
#include <set>
|
|
||||||
|
|
||||||
using json = nlohmann::json;
|
using json = nlohmann::json;
|
||||||
|
|
||||||
@ -181,7 +182,7 @@ static json unguided_transcription(struct whisper_context * ctx, audio_async &au
|
|||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.suppress_non_speech_tokens = true;
|
wparams.suppress_nst = true;
|
||||||
// run the transformer and a single decoding pass
|
// run the transformer and a single decoding pass
|
||||||
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
||||||
fprintf(stderr, "%s: ERROR: whisper_full() failed\n", __func__);
|
fprintf(stderr, "%s: ERROR: whisper_full() failed\n", __func__);
|
||||||
@ -225,7 +226,7 @@ static json guided_transcription(struct whisper_context * ctx, audio_async &audi
|
|||||||
wparams.prompt_tokens = cs.prompt_tokens.data();
|
wparams.prompt_tokens = cs.prompt_tokens.data();
|
||||||
wparams.prompt_n_tokens = cs.prompt_tokens.size();
|
wparams.prompt_n_tokens = cs.prompt_tokens.size();
|
||||||
// TODO: properly expose as option
|
// TODO: properly expose as option
|
||||||
wparams.suppress_non_speech_tokens = true;
|
wparams.suppress_nst = true;
|
||||||
|
|
||||||
// run the transformer and a single decoding pass
|
// run the transformer and a single decoding pass
|
||||||
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
||||||
|
93468
examples/miniaudio.h
Normal file
93468
examples/miniaudio.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
|||||||
set(TARGET server)
|
set(TARGET whisper-server)
|
||||||
add_executable(${TARGET} server.cpp httplib.h)
|
add_executable(${TARGET} server.cpp httplib.h)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
@ -8,3 +8,5 @@ target_link_libraries(${TARGET} PRIVATE common json_cpp whisper ${CMAKE_THREAD_L
|
|||||||
if (WIN32)
|
if (WIN32)
|
||||||
target_link_libraries(${TARGET} PRIVATE ws2_32)
|
target_link_libraries(${TARGET} PRIVATE ws2_32)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# whisper.cpp http server
|
# whisper.cpp/examples/server
|
||||||
|
|
||||||
Simple http server. WAV Files are passed to the inference model via http requests.
|
Simple http server. WAV Files are passed to the inference model via http requests.
|
||||||
|
|
||||||
@ -7,9 +7,9 @@ https://github.com/ggerganov/whisper.cpp/assets/1991296/e983ee53-8741-4eb5-9048-
|
|||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
```
|
```
|
||||||
./server -h
|
./build/bin/whisper-server -h
|
||||||
|
|
||||||
usage: ./bin/server [options]
|
usage: ./build/bin/whisper-server [options]
|
||||||
|
|
||||||
options:
|
options:
|
||||||
-h, --help [default] show this help message and exit
|
-h, --help [default] show this help message and exit
|
||||||
|
@ -1,17 +1,18 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
#include "common-whisper.h"
|
||||||
|
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
#include "httplib.h"
|
#include "httplib.h"
|
||||||
#include "json.hpp"
|
#include "json.hpp"
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <fstream>
|
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
|
#include <fstream>
|
||||||
|
#include <sstream>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <cstring>
|
|
||||||
#include <sstream>
|
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
@ -61,6 +62,7 @@ struct whisper_params {
|
|||||||
float logprob_thold = -1.00f;
|
float logprob_thold = -1.00f;
|
||||||
float temperature = 0.00f;
|
float temperature = 0.00f;
|
||||||
float temperature_inc = 0.20f;
|
float temperature_inc = 0.20f;
|
||||||
|
float no_speech_thold = 0.6f;
|
||||||
|
|
||||||
bool debug_mode = false;
|
bool debug_mode = false;
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
@ -76,6 +78,7 @@ struct whisper_params {
|
|||||||
bool no_timestamps = false;
|
bool no_timestamps = false;
|
||||||
bool use_gpu = true;
|
bool use_gpu = true;
|
||||||
bool flash_attn = false;
|
bool flash_attn = false;
|
||||||
|
bool suppress_nst = false;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string prompt = "";
|
std::string prompt = "";
|
||||||
@ -134,7 +137,9 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " --public PATH, [%-7s] Path to the public folder\n", sparams.public_path.c_str());
|
fprintf(stderr, " --public PATH, [%-7s] Path to the public folder\n", sparams.public_path.c_str());
|
||||||
fprintf(stderr, " --request-path PATH, [%-7s] Request path for all requests\n", sparams.request_path.c_str());
|
fprintf(stderr, " --request-path PATH, [%-7s] Request path for all requests\n", sparams.request_path.c_str());
|
||||||
fprintf(stderr, " --inference-path PATH, [%-7s] Inference path for all requests\n", sparams.inference_path.c_str());
|
fprintf(stderr, " --inference-path PATH, [%-7s] Inference path for all requests\n", sparams.inference_path.c_str());
|
||||||
fprintf(stderr, " --convert, [%-7s] Convert audio to WAV, requires ffmpeg on the server", sparams.ffmpeg_converter ? "true" : "false");
|
fprintf(stderr, " --convert, [%-7s] Convert audio to WAV, requires ffmpeg on the server\n", sparams.ffmpeg_converter ? "true" : "false");
|
||||||
|
fprintf(stderr, " -sns, --suppress-nst [%-7s] suppress non-speech tokens\n", params.suppress_nst ? "true" : "false");
|
||||||
|
fprintf(stderr, " -nth N, --no-speech-thold N [%-7.2f] no speech threshold\n", params.no_speech_thold);
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,6 +184,9 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params, serve
|
|||||||
else if (arg == "-dtw" || arg == "--dtw") { params.dtw = argv[++i]; }
|
else if (arg == "-dtw" || arg == "--dtw") { params.dtw = argv[++i]; }
|
||||||
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
|
else if (arg == "-sns" || arg == "--suppress-nst") { params.suppress_nst = true; }
|
||||||
|
else if (arg == "-nth" || arg == "--no-speech-thold") { params.no_speech_thold = std::stof(argv[++i]); }
|
||||||
|
|
||||||
// server params
|
// server params
|
||||||
else if ( arg == "--port") { sparams.port = std::stoi(argv[++i]); }
|
else if ( arg == "--port") { sparams.port = std::stoi(argv[++i]); }
|
||||||
else if ( arg == "--host") { sparams.hostname = argv[++i]; }
|
else if ( arg == "--host") { sparams.hostname = argv[++i]; }
|
||||||
@ -216,6 +224,24 @@ void check_ffmpeg_availibility() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string generate_temp_filename(const std::string &prefix, const std::string &extension) {
|
||||||
|
auto now = std::chrono::system_clock::now();
|
||||||
|
auto now_time_t = std::chrono::system_clock::to_time_t(now);
|
||||||
|
|
||||||
|
static std::mt19937 rng{std::random_device{}()};
|
||||||
|
std::uniform_int_distribution<long long> dist(0, 1e9);
|
||||||
|
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << prefix
|
||||||
|
<< "-"
|
||||||
|
<< std::put_time(std::localtime(&now_time_t), "%Y%m%d-%H%M%S")
|
||||||
|
<< "-"
|
||||||
|
<< dist(rng)
|
||||||
|
<< extension;
|
||||||
|
|
||||||
|
return ss.str();
|
||||||
|
}
|
||||||
|
|
||||||
bool convert_to_wav(const std::string & temp_filename, std::string & error_resp) {
|
bool convert_to_wav(const std::string & temp_filename, std::string & error_resp) {
|
||||||
std::ostringstream cmd_stream;
|
std::ostringstream cmd_stream;
|
||||||
std::string converted_filename_temp = temp_filename + "_temp.wav";
|
std::string converted_filename_temp = temp_filename + "_temp.wav";
|
||||||
@ -472,6 +498,14 @@ void get_req_parameters(const Request & req, whisper_params & params)
|
|||||||
{
|
{
|
||||||
params.temperature_inc = std::stof(req.get_file_value("temperature_inc").content);
|
params.temperature_inc = std::stof(req.get_file_value("temperature_inc").content);
|
||||||
}
|
}
|
||||||
|
if (req.has_file("suppress_non_speech"))
|
||||||
|
{
|
||||||
|
params.suppress_nst = parse_str_to_bool(req.get_file_value("suppress_non_speech").content);
|
||||||
|
}
|
||||||
|
if (req.has_file("suppress_nst"))
|
||||||
|
{
|
||||||
|
params.suppress_nst = parse_str_to_bool(req.get_file_value("suppress_nst").content);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -677,8 +711,7 @@ int main(int argc, char ** argv) {
|
|||||||
if (sparams.ffmpeg_converter) {
|
if (sparams.ffmpeg_converter) {
|
||||||
// if file is not wav, convert to wav
|
// if file is not wav, convert to wav
|
||||||
// write to temporary file
|
// write to temporary file
|
||||||
const std::string temp_filename_base = std::tmpnam(nullptr);
|
const std::string temp_filename = generate_temp_filename("whisper-server", ".wav");
|
||||||
const std::string temp_filename = temp_filename_base + ".wav";
|
|
||||||
std::ofstream temp_file{temp_filename, std::ios::binary};
|
std::ofstream temp_file{temp_filename, std::ios::binary};
|
||||||
temp_file << audio_file.content;
|
temp_file << audio_file.content;
|
||||||
temp_file.close();
|
temp_file.close();
|
||||||
@ -690,8 +723,8 @@ int main(int argc, char ** argv) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// read wav content into pcmf32
|
// read audio content into pcmf32
|
||||||
if (!::read_wav(temp_filename, pcmf32, pcmf32s, params.diarize))
|
if (!::read_audio_data(temp_filename, pcmf32, pcmf32s, params.diarize))
|
||||||
{
|
{
|
||||||
fprintf(stderr, "error: failed to read WAV file '%s'\n", temp_filename.c_str());
|
fprintf(stderr, "error: failed to read WAV file '%s'\n", temp_filename.c_str());
|
||||||
const std::string error_resp = "{\"error\":\"failed to read WAV file\"}";
|
const std::string error_resp = "{\"error\":\"failed to read WAV file\"}";
|
||||||
@ -702,16 +735,15 @@ int main(int argc, char ** argv) {
|
|||||||
// remove temp file
|
// remove temp file
|
||||||
std::remove(temp_filename.c_str());
|
std::remove(temp_filename.c_str());
|
||||||
} else {
|
} else {
|
||||||
if (!::read_wav(audio_file.content, pcmf32, pcmf32s, params.diarize))
|
if (!::read_audio_data(audio_file.content, pcmf32, pcmf32s, params.diarize))
|
||||||
{
|
{
|
||||||
fprintf(stderr, "error: failed to read WAV file\n");
|
fprintf(stderr, "error: failed to read audio data\n");
|
||||||
const std::string error_resp = "{\"error\":\"failed to read WAV file\"}";
|
const std::string error_resp = "{\"error\":\"failed to read audio data\"}";
|
||||||
res.set_content(error_resp, "application/json");
|
res.set_content(error_resp, "application/json");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
printf("Successfully loaded %s\n", filename.c_str());
|
printf("Successfully loaded %s\n", filename.c_str());
|
||||||
|
|
||||||
// print system information
|
// print system information
|
||||||
@ -779,6 +811,7 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.beam_search.beam_size = params.beam_size;
|
wparams.beam_search.beam_size = params.beam_size;
|
||||||
|
|
||||||
wparams.temperature = params.temperature;
|
wparams.temperature = params.temperature;
|
||||||
|
wparams.no_speech_thold = params.no_speech_thold;
|
||||||
wparams.temperature_inc = params.temperature_inc;
|
wparams.temperature_inc = params.temperature_inc;
|
||||||
wparams.entropy_thold = params.entropy_thold;
|
wparams.entropy_thold = params.entropy_thold;
|
||||||
wparams.logprob_thold = params.logprob_thold;
|
wparams.logprob_thold = params.logprob_thold;
|
||||||
@ -786,6 +819,8 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.no_timestamps = params.no_timestamps;
|
wparams.no_timestamps = params.no_timestamps;
|
||||||
wparams.token_timestamps = !params.no_timestamps && params.response_format == vjson_format;
|
wparams.token_timestamps = !params.no_timestamps && params.response_format == vjson_format;
|
||||||
|
|
||||||
|
wparams.suppress_nst = params.suppress_nst;
|
||||||
|
|
||||||
whisper_print_user_data user_data = { ¶ms, &pcmf32s, 0 };
|
whisper_print_user_data user_data = { ¶ms, &pcmf32s, 0 };
|
||||||
|
|
||||||
// this callback is called on each new segment
|
// this callback is called on each new segment
|
||||||
@ -929,7 +964,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// TODO compression_ratio and no_speech_prob are not implemented yet
|
// TODO compression_ratio and no_speech_prob are not implemented yet
|
||||||
// segment["compression_ratio"] = 0;
|
// segment["compression_ratio"] = 0;
|
||||||
// segment["no_speech_prob"] = 0;
|
segment["no_speech_prob"] = whisper_full_get_segment_no_speech_prob(ctx, i);
|
||||||
|
|
||||||
jres["segments"].push_back(segment);
|
jres["segments"].push_back(segment);
|
||||||
}
|
}
|
||||||
|
5584
examples/stb_vorbis.c
Normal file
5584
examples/stb_vorbis.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,9 +1,10 @@
|
|||||||
if (WHISPER_SDL2)
|
if (WHISPER_SDL2)
|
||||||
# stream
|
set(TARGET whisper-stream)
|
||||||
set(TARGET stream)
|
|
||||||
add_executable(${TARGET} stream.cpp)
|
add_executable(${TARGET} stream.cpp)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
# stream
|
# whisper.cpp/examples/stream
|
||||||
|
|
||||||
This is a naive example of performing real-time inference on audio from your microphone.
|
This is a naive example of performing real-time inference on audio from your microphone.
|
||||||
The `stream` tool samples the audio every half a second and runs the transcription continously.
|
The `whisper-stream` tool samples the audio every half a second and runs the transcription continously.
|
||||||
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./build/bin/stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
./build/bin/whisper-stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
||||||
```
|
```
|
||||||
|
|
||||||
https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a80f-28ba83be7d09.mp4
|
https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a80f-28ba83be7d09.mp4
|
||||||
@ -15,7 +15,7 @@ https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a
|
|||||||
Setting the `--step` argument to `0` enables the sliding window mode:
|
Setting the `--step` argument to `0` enables the sliding window mode:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./build/bin/stream -m ./models/ggml-base.en.bin -t 6 --step 0 --length 30000 -vth 0.6
|
./build/bin/whisper-stream -m ./models/ggml-base.en.bin -t 6 --step 0 --length 30000 -vth 0.6
|
||||||
```
|
```
|
||||||
|
|
||||||
In this mode, the tool will transcribe only after some speech activity is detected. A very
|
In this mode, the tool will transcribe only after some speech activity is detected. A very
|
||||||
@ -27,7 +27,7 @@ a transcription block that is suitable for parsing.
|
|||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
The `stream` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
The `whisper-stream` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install SDL2
|
# Install SDL2
|
||||||
@ -43,7 +43,7 @@ brew install sdl2
|
|||||||
cmake -B build -DWHISPER_SDL2=ON
|
cmake -B build -DWHISPER_SDL2=ON
|
||||||
cmake --build build --config Release
|
cmake --build build --config Release
|
||||||
|
|
||||||
./build/bin/stream
|
./build/bin/whisper-stream
|
||||||
```
|
```
|
||||||
|
|
||||||
## Web version
|
## Web version
|
||||||
|
@ -4,15 +4,15 @@
|
|||||||
//
|
//
|
||||||
#include "common-sdl.h"
|
#include "common-sdl.h"
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
#include "common-whisper.h"
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
|
||||||
#include <cassert>
|
#include <chrono>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
|
#include <fstream>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <fstream>
|
|
||||||
|
|
||||||
|
|
||||||
// command-line parameters
|
// command-line parameters
|
||||||
struct whisper_params {
|
struct whisper_params {
|
||||||
@ -23,6 +23,7 @@ struct whisper_params {
|
|||||||
int32_t capture_id = -1;
|
int32_t capture_id = -1;
|
||||||
int32_t max_tokens = 32;
|
int32_t max_tokens = 32;
|
||||||
int32_t audio_ctx = 0;
|
int32_t audio_ctx = 0;
|
||||||
|
int32_t beam_size = -1;
|
||||||
|
|
||||||
float vad_thold = 0.6f;
|
float vad_thold = 0.6f;
|
||||||
float freq_thold = 100.0f;
|
float freq_thold = 100.0f;
|
||||||
@ -59,6 +60,7 @@ static bool whisper_params_parse(int argc, char ** argv, whisper_params & params
|
|||||||
else if (arg == "-c" || arg == "--capture") { params.capture_id = std::stoi(argv[++i]); }
|
else if (arg == "-c" || arg == "--capture") { params.capture_id = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-mt" || arg == "--max-tokens") { params.max_tokens = std::stoi(argv[++i]); }
|
else if (arg == "-mt" || arg == "--max-tokens") { params.max_tokens = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
||||||
|
else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(argv[++i]); }
|
||||||
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); }
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
@ -96,6 +98,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -c ID, --capture ID [%-7d] capture device ID\n", params.capture_id);
|
fprintf(stderr, " -c ID, --capture ID [%-7d] capture device ID\n", params.capture_id);
|
||||||
fprintf(stderr, " -mt N, --max-tokens N [%-7d] maximum number of tokens per audio chunk\n", params.max_tokens);
|
fprintf(stderr, " -mt N, --max-tokens N [%-7d] maximum number of tokens per audio chunk\n", params.max_tokens);
|
||||||
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx);
|
||||||
|
fprintf(stderr, " -bs N, --beam-size N [%-7d] beam size for beam search\n", params.beam_size);
|
||||||
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold);
|
||||||
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold);
|
||||||
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false");
|
||||||
@ -241,6 +244,11 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
if (!use_vad) {
|
if (!use_vad) {
|
||||||
while (true) {
|
while (true) {
|
||||||
|
// handle Ctrl + C
|
||||||
|
is_running = sdl_poll_events();
|
||||||
|
if (!is_running) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
audio.get(params.step_ms, pcmf32_new);
|
audio.get(params.step_ms, pcmf32_new);
|
||||||
|
|
||||||
if ((int) pcmf32_new.size() > 2*n_samples_step) {
|
if ((int) pcmf32_new.size() > 2*n_samples_step) {
|
||||||
@ -298,7 +306,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// run the inference
|
// run the inference
|
||||||
{
|
{
|
||||||
whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
|
whisper_full_params wparams = whisper_full_default_params(params.beam_size > 1 ? WHISPER_SAMPLING_BEAM_SEARCH : WHISPER_SAMPLING_GREEDY);
|
||||||
|
|
||||||
wparams.print_progress = false;
|
wparams.print_progress = false;
|
||||||
wparams.print_special = params.print_special;
|
wparams.print_special = params.print_special;
|
||||||
@ -309,6 +317,7 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.max_tokens = params.max_tokens;
|
wparams.max_tokens = params.max_tokens;
|
||||||
wparams.language = params.language.c_str();
|
wparams.language = params.language.c_str();
|
||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
wparams.beam_search.beam_size = params.beam_size;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
|
|
||||||
|
@ -1,19 +1,31 @@
|
|||||||
if (WHISPER_SDL2)
|
if (WHISPER_SDL2)
|
||||||
# talk-llama
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
set(TARGET talk-llama)
|
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
|
|
||||||
|
set(TARGET whisper-talk-llama)
|
||||||
add_executable(${TARGET} talk-llama.cpp
|
add_executable(${TARGET} talk-llama.cpp
|
||||||
llama.cpp
|
llama.cpp
|
||||||
llama-vocab.cpp
|
llama-adapter.cpp
|
||||||
|
llama-arch.cpp
|
||||||
|
llama-batch.cpp
|
||||||
|
llama-chat.cpp
|
||||||
|
llama-context.cpp
|
||||||
|
llama-cparams.cpp
|
||||||
llama-grammar.cpp
|
llama-grammar.cpp
|
||||||
|
llama-hparams.cpp
|
||||||
|
llama-impl.cpp
|
||||||
|
llama-kv-cache.cpp
|
||||||
|
llama-mmap.cpp
|
||||||
|
llama-model-loader.cpp
|
||||||
|
llama-model.cpp
|
||||||
|
llama-quant.cpp
|
||||||
llama-sampling.cpp
|
llama-sampling.cpp
|
||||||
|
llama-vocab.cpp
|
||||||
unicode.cpp
|
unicode.cpp
|
||||||
unicode-data.cpp)
|
unicode-data.cpp)
|
||||||
target_include_directories(${TARGET} PRIVATE ${SDL2_INCLUDE_DIRS})
|
target_include_directories(${TARGET} PRIVATE ${SDL2_INCLUDE_DIRS})
|
||||||
|
|
||||||
if (WHISPER_CLBLAST)
|
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||||
set(CLBLAST_LIBNAME clblast)
|
|
||||||
endif ()
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${SDL2_LIBRARIES} ${CLBLAST_LIBNAME} ${CMAKE_THREAD_LIBS_INIT})
|
|
||||||
|
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
# It requires Windows 8.1 or later for PrefetchVirtualMemory
|
# It requires Windows 8.1 or later for PrefetchVirtualMemory
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# talk-llama
|
# whisper.cpp/examples/talk-llama
|
||||||
|
|
||||||
Talk with an LLaMA AI in your terminal
|
Talk with an LLaMA AI in your terminal
|
||||||
|
|
||||||
@ -12,7 +12,7 @@ https://github.com/ggerganov/whisper.cpp/assets/1991296/d97a3788-bf2a-4756-9a43-
|
|||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
The `talk-llama` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
The `whisper-talk-llama` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install SDL2
|
# Install SDL2
|
||||||
@ -25,11 +25,12 @@ sudo dnf install SDL2 SDL2-devel
|
|||||||
# Install SDL2 on Mac OS
|
# Install SDL2 on Mac OS
|
||||||
brew install sdl2
|
brew install sdl2
|
||||||
|
|
||||||
# Build the "talk-llama" executable
|
# Build the "whisper-talk-llama" executable
|
||||||
make talk-llama
|
cmake -B build -S . -DWHISPER_SDL2=ON
|
||||||
|
cmake --build build --config Release
|
||||||
|
|
||||||
# Run it
|
# Run it
|
||||||
./talk-llama -mw ./models/ggml-small.en.bin -ml ../llama.cpp/models/llama-13b/ggml-model-q4_0.gguf -p "Georgi" -t 8
|
./build/bin/whisper-talk-llama -mw ./models/ggml-small.en.bin -ml ../llama.cpp/models/llama-13b/ggml-model-q4_0.gguf -p "Georgi" -t 8
|
||||||
```
|
```
|
||||||
|
|
||||||
- The `-mw` argument specifies the Whisper model that you would like to use. Recommended `base` or `small` for real-time experience
|
- The `-mw` argument specifies the Whisper model that you would like to use. Recommended `base` or `small` for real-time experience
|
||||||
@ -37,16 +38,16 @@ make talk-llama
|
|||||||
|
|
||||||
## Session
|
## Session
|
||||||
|
|
||||||
The `talk-llama` tool supports session management to enable more coherent and continuous conversations. By maintaining context from previous interactions, it can better understand and respond to user requests in a more natural way.
|
The `whisper-talk-llama` tool supports session management to enable more coherent and continuous conversations. By maintaining context from previous interactions, it can better understand and respond to user requests in a more natural way.
|
||||||
|
|
||||||
To enable session support, use the `--session FILE` command line option when running the program. The `talk-llama` model state will be saved to the specified file after each interaction. If the file does not exist, it will be created. If the file exists, the model state will be loaded from it, allowing you to resume a previous session.
|
To enable session support, use the `--session FILE` command line option when running the program. The `whisper-talk-llama` model state will be saved to the specified file after each interaction. If the file does not exist, it will be created. If the file exists, the model state will be loaded from it, allowing you to resume a previous session.
|
||||||
|
|
||||||
This feature is especially helpful for maintaining context in long conversations or when interacting with the AI assistant across multiple sessions. It ensures that the assistant remembers the previous interactions and can provide more relevant and contextual responses.
|
This feature is especially helpful for maintaining context in long conversations or when interacting with the AI assistant across multiple sessions. It ensures that the assistant remembers the previous interactions and can provide more relevant and contextual responses.
|
||||||
|
|
||||||
Example usage:
|
Example usage:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./talk-llama --session ./my-session-file -mw ./models/ggml-small.en.bin -ml ../llama.cpp/models/llama-13b/ggml-model-q4_0.gguf -p "Georgi" -t 8
|
./build/bin/whisper-talk-llama --session ./my-session-file -mw ./models/ggml-small.en.bin -ml ../llama.cpp/models/llama-13b/ggml-model-q4_0.gguf -p "Georgi" -t 8
|
||||||
```
|
```
|
||||||
|
|
||||||
## TTS
|
## TTS
|
||||||
|
347
examples/talk-llama/llama-adapter.cpp
Normal file
347
examples/talk-llama/llama-adapter.cpp
Normal file
@ -0,0 +1,347 @@
|
|||||||
|
#include "llama-adapter.h"
|
||||||
|
|
||||||
|
#include "llama-impl.h"
|
||||||
|
#include "llama-mmap.h"
|
||||||
|
#include "llama-model.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <map>
|
||||||
|
#include <cassert>
|
||||||
|
#include <stdexcept>
|
||||||
|
|
||||||
|
// vec
|
||||||
|
|
||||||
|
struct ggml_tensor * llama_adapter_cvec::tensor_for(int il) const {
|
||||||
|
if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
return tensors[il];
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * llama_adapter_cvec::apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const {
|
||||||
|
ggml_tensor * layer_dir = tensor_for(il);
|
||||||
|
if (layer_dir != nullptr) {
|
||||||
|
cur = ggml_add(ctx, cur, layer_dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
return cur;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool llama_adapter_cvec::init(const llama_model & model) {
|
||||||
|
const auto & hparams = model.hparams;
|
||||||
|
|
||||||
|
GGML_ASSERT(tensors.empty());
|
||||||
|
GGML_ASSERT(ctxs.empty());
|
||||||
|
GGML_ASSERT(bufs.empty());
|
||||||
|
|
||||||
|
// create a context for each buffer type
|
||||||
|
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
|
||||||
|
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
|
||||||
|
auto it = ctx_map.find(buft);
|
||||||
|
if (it == ctx_map.end()) {
|
||||||
|
struct ggml_init_params params = {
|
||||||
|
/*.mem_size =*/ hparams.n_layer*ggml_tensor_overhead(),
|
||||||
|
/*.mem_buffer =*/ NULL,
|
||||||
|
/*.no_alloc =*/ true,
|
||||||
|
};
|
||||||
|
|
||||||
|
ggml_context * ctx = ggml_init(params);
|
||||||
|
if (!ctx) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx_map[buft] = ctx;
|
||||||
|
ctxs.emplace_back(ctx);
|
||||||
|
|
||||||
|
return ctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
return it->second;
|
||||||
|
};
|
||||||
|
|
||||||
|
// make tensors
|
||||||
|
tensors.reserve(hparams.n_layer);
|
||||||
|
tensors.push_back(nullptr); // there's never a tensor for layer 0
|
||||||
|
for (size_t il = 1; il < hparams.n_layer; il++) {
|
||||||
|
ggml_backend_buffer_type_t buft = model.select_buft(il);
|
||||||
|
ggml_context * ctx = ctx_for_buft(buft);
|
||||||
|
if (!ctx) {
|
||||||
|
LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
|
||||||
|
tensors.push_back(tensor);
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocate tensors / buffers and zero
|
||||||
|
bufs.reserve(ctx_map.size());
|
||||||
|
for (auto it : ctx_map) {
|
||||||
|
ggml_backend_buffer_type_t buft = it.first;
|
||||||
|
ggml_context * ctx = it.second;
|
||||||
|
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
|
||||||
|
if (!buf) {
|
||||||
|
LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
ggml_backend_buffer_clear(buf, 0);
|
||||||
|
bufs.emplace_back(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t llama_adapter_cvec::apply(
|
||||||
|
const llama_model & model,
|
||||||
|
const float * data,
|
||||||
|
size_t len,
|
||||||
|
int32_t n_embd,
|
||||||
|
int32_t il_start,
|
||||||
|
int32_t il_end) {
|
||||||
|
const auto & hparams = model.hparams;
|
||||||
|
|
||||||
|
if (data == nullptr) {
|
||||||
|
// disable the current control vector (but leave allocated for later)
|
||||||
|
layer_start = -1;
|
||||||
|
layer_end = -1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (n_embd != (int) hparams.n_embd) {
|
||||||
|
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tensors.empty()) {
|
||||||
|
if (!init(model)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
layer_start = il_start;
|
||||||
|
layer_end = il_end;
|
||||||
|
|
||||||
|
for (size_t il = 1; il < hparams.n_layer; il++) {
|
||||||
|
assert(tensors[il] != nullptr);
|
||||||
|
|
||||||
|
const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
|
||||||
|
if (off + n_embd <= len) {
|
||||||
|
ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// lora
|
||||||
|
|
||||||
|
llama_adapter_lora_weight * llama_adapter_lora::get_weight(struct ggml_tensor * w) {
|
||||||
|
const std::string name(w->name);
|
||||||
|
|
||||||
|
const auto pos = ab_map.find(name);
|
||||||
|
if (pos != ab_map.end()) {
|
||||||
|
return &pos->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void llama_adapter_lora_init_impl(struct llama_model & model, const char * path_lora, struct llama_adapter_lora & adapter) {
|
||||||
|
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
|
||||||
|
|
||||||
|
ggml_context * ctx_init;
|
||||||
|
struct gguf_init_params meta_gguf_params = {
|
||||||
|
/* .no_alloc = */ true,
|
||||||
|
/* .ctx = */ &ctx_init,
|
||||||
|
};
|
||||||
|
|
||||||
|
gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
|
||||||
|
if (!ctx_gguf) {
|
||||||
|
throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_context_ptr ctx { ctx_init };
|
||||||
|
|
||||||
|
// check metadata
|
||||||
|
{
|
||||||
|
auto get_kv_str = [&](const std::string & key) -> std::string {
|
||||||
|
int id = gguf_find_key(ctx_gguf.get(), key.c_str());
|
||||||
|
return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf.get(), id));
|
||||||
|
};
|
||||||
|
auto get_kv_f32 = [&](const std::string & key) -> float {
|
||||||
|
int id = gguf_find_key(ctx_gguf.get(), key.c_str());
|
||||||
|
return id < 0 ? 0.0f : gguf_get_val_f32(ctx_gguf.get(), id);
|
||||||
|
};
|
||||||
|
LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
|
||||||
|
|
||||||
|
auto general_type = get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE));
|
||||||
|
if (general_type != "adapter") {
|
||||||
|
throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto general_arch_str = get_kv_str(llm_kv(LLM_KV_GENERAL_ARCHITECTURE));
|
||||||
|
auto general_arch = llm_arch_from_string(general_arch_str);
|
||||||
|
if (general_arch != model.arch) {
|
||||||
|
throw std::runtime_error("model arch and LoRA arch mismatch");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto adapter_type = get_kv_str(llm_kv(LLM_KV_ADAPTER_TYPE));
|
||||||
|
if (adapter_type != "lora") {
|
||||||
|
throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
adapter.alpha = get_kv_f32(llm_kv(LLM_KV_ADAPTER_LORA_ALPHA));
|
||||||
|
}
|
||||||
|
|
||||||
|
int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
|
||||||
|
|
||||||
|
// contexts for each buffer type
|
||||||
|
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
|
||||||
|
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
|
||||||
|
auto it = ctx_map.find(buft);
|
||||||
|
if (it == ctx_map.end()) {
|
||||||
|
// add a new context
|
||||||
|
struct ggml_init_params params = {
|
||||||
|
/*.mem_size =*/ n_tensors*ggml_tensor_overhead(),
|
||||||
|
/*.mem_buffer =*/ NULL,
|
||||||
|
/*.no_alloc =*/ true,
|
||||||
|
};
|
||||||
|
ggml_context * buft_ctx = ggml_init(params);
|
||||||
|
if (!buft_ctx) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
ctx_map[buft] = buft_ctx;
|
||||||
|
adapter.ctxs.emplace_back(buft_ctx);
|
||||||
|
return buft_ctx;
|
||||||
|
};
|
||||||
|
return it->second;
|
||||||
|
};
|
||||||
|
|
||||||
|
// bundle lora_a and lora_b into pairs
|
||||||
|
std::map<std::string, llama_adapter_lora_weight> ab_map;
|
||||||
|
auto str_endswith = [](const std::string & str, const std::string & suffix) {
|
||||||
|
return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
for (ggml_tensor * cur = ggml_get_first_tensor(ctx.get()); cur; cur = ggml_get_next_tensor(ctx.get(), cur)) {
|
||||||
|
std::string name(cur->name);
|
||||||
|
if (str_endswith(name, ".lora_a")) {
|
||||||
|
replace_all(name, ".lora_a", "");
|
||||||
|
if (ab_map.find(name) == ab_map.end()) {
|
||||||
|
ab_map[name] = llama_adapter_lora_weight(cur, nullptr);
|
||||||
|
} else {
|
||||||
|
ab_map[name].a = cur;
|
||||||
|
}
|
||||||
|
} else if (str_endswith(name, ".lora_b")) {
|
||||||
|
replace_all(name, ".lora_b", "");
|
||||||
|
if (ab_map.find(name) == ab_map.end()) {
|
||||||
|
ab_map[name] = llama_adapter_lora_weight(nullptr, cur);
|
||||||
|
} else {
|
||||||
|
ab_map[name].b = cur;
|
||||||
|
}
|
||||||
|
} else if (str_endswith(name, "_norm.weight")) {
|
||||||
|
// TODO: add support for norm vector
|
||||||
|
// for now, we don't really care because most adapters still work fine without it
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error("LoRA tensor '" + name + "' has unexpected suffix");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// add tensors
|
||||||
|
for (auto & it : ab_map) {
|
||||||
|
const std::string & name = it.first;
|
||||||
|
llama_adapter_lora_weight & w = it.second;
|
||||||
|
bool is_token_embd = str_endswith(name, "token_embd.weight");
|
||||||
|
|
||||||
|
if (!w.a || !w.b) {
|
||||||
|
throw std::runtime_error("LoRA tensor pair for '" + name + "' is missing one component");
|
||||||
|
}
|
||||||
|
|
||||||
|
// device buft and device ctx
|
||||||
|
const auto * model_tensor = model.get_tensor(name.c_str());
|
||||||
|
if (!model_tensor) {
|
||||||
|
throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model (hint: maybe wrong base model?)");
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer));
|
||||||
|
// validate tensor shape
|
||||||
|
if (is_token_embd) {
|
||||||
|
// expect B to be non-transposed, A and B are flipped; see llm_build_inp_embd()
|
||||||
|
if (model_tensor->ne[0] != w.b->ne[1] || model_tensor->ne[1] != w.a->ne[1]) {
|
||||||
|
throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) {
|
||||||
|
throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)");
|
||||||
|
}
|
||||||
|
if (w.a->ne[1] != w.b->ne[0]) {
|
||||||
|
throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// save tensor to adapter
|
||||||
|
struct ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a);
|
||||||
|
struct ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b);
|
||||||
|
ggml_set_name(tensor_a, w.a->name);
|
||||||
|
ggml_set_name(tensor_b, w.b->name);
|
||||||
|
adapter.ab_map[name] = llama_adapter_lora_weight(tensor_a, tensor_b);
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocate tensors / buffers and zero
|
||||||
|
{
|
||||||
|
adapter.ctxs.reserve(ctx_map.size());
|
||||||
|
adapter.bufs.reserve(ctx_map.size());
|
||||||
|
for (auto & it : ctx_map) {
|
||||||
|
ggml_backend_buffer_type_t buft = it.first;
|
||||||
|
ggml_context * ctx_dev = it.second;
|
||||||
|
ggml_backend_buffer_ptr buf { ggml_backend_alloc_ctx_tensors_from_buft(ctx_dev, buft) };
|
||||||
|
if (!buf) {
|
||||||
|
throw std::runtime_error("failed to allocate buffer for lora adapter\n");
|
||||||
|
}
|
||||||
|
LLAMA_LOG_INFO("%s: %10s LoRA buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get())/1024.0/1024.0);
|
||||||
|
adapter.bufs.emplace_back(std::move(buf));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// set tensor data
|
||||||
|
{
|
||||||
|
llama_file gguf_file(path_lora, "rb");
|
||||||
|
std::vector<uint8_t> read_buf;
|
||||||
|
auto set_tensor = [&](struct ggml_tensor * orig, struct ggml_tensor * dev) {
|
||||||
|
size_t offs = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), gguf_find_tensor(ctx_gguf.get(), orig->name));
|
||||||
|
size_t size = ggml_nbytes(orig);
|
||||||
|
read_buf.resize(size);
|
||||||
|
gguf_file.seek(offs, SEEK_SET);
|
||||||
|
gguf_file.read_raw(read_buf.data(), size);
|
||||||
|
ggml_backend_tensor_set(dev, read_buf.data(), 0, size);
|
||||||
|
};
|
||||||
|
for (auto & it : adapter.ab_map) {
|
||||||
|
auto orig = ab_map[it.first];
|
||||||
|
auto dev = it.second;
|
||||||
|
set_tensor(orig.a, dev.a);
|
||||||
|
set_tensor(orig.b, dev.b);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LLAMA_LOG_INFO("%s: loaded %zu tensors from lora file\n", __func__, adapter.ab_map.size()*2);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct llama_adapter_lora * llama_adapter_lora_init(struct llama_model * model, const char * path_lora) {
|
||||||
|
struct llama_adapter_lora * adapter = new llama_adapter_lora();
|
||||||
|
|
||||||
|
try {
|
||||||
|
llama_adapter_lora_init_impl(*model, path_lora, *adapter);
|
||||||
|
return adapter;
|
||||||
|
} catch (const std::exception & err) {
|
||||||
|
LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
|
||||||
|
|
||||||
|
delete adapter;
|
||||||
|
}
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void llama_adapter_lora_free(struct llama_adapter_lora * adapter) {
|
||||||
|
delete adapter;
|
||||||
|
}
|
74
examples/talk-llama/llama-adapter.h
Normal file
74
examples/talk-llama/llama-adapter.h
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include "ggml-cpp.h"
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
// TODO: pimpl
|
||||||
|
|
||||||
|
//
|
||||||
|
// llama_adapter_cvec
|
||||||
|
//
|
||||||
|
|
||||||
|
struct llama_adapter_cvec {
|
||||||
|
struct ggml_tensor * tensor_for(int il) const;
|
||||||
|
|
||||||
|
struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const;
|
||||||
|
|
||||||
|
int32_t apply(
|
||||||
|
const llama_model & model,
|
||||||
|
const float * data,
|
||||||
|
size_t len,
|
||||||
|
int32_t n_embd,
|
||||||
|
int32_t il_start,
|
||||||
|
int32_t il_end);
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool init(const llama_model & model);
|
||||||
|
|
||||||
|
int32_t layer_start = -1;
|
||||||
|
int32_t layer_end = -1;
|
||||||
|
|
||||||
|
std::vector<ggml_context_ptr> ctxs;
|
||||||
|
std::vector<ggml_backend_buffer_ptr> bufs;
|
||||||
|
|
||||||
|
std::vector<struct ggml_tensor *> tensors; // per layer
|
||||||
|
};
|
||||||
|
|
||||||
|
//
|
||||||
|
// llama_adapter_lora
|
||||||
|
//
|
||||||
|
|
||||||
|
struct llama_adapter_lora_weight {
|
||||||
|
struct ggml_tensor * a = nullptr;
|
||||||
|
struct ggml_tensor * b = nullptr;
|
||||||
|
|
||||||
|
// get actual scale based on rank and alpha
|
||||||
|
float get_scale(float alpha, float adapter_scale) const {
|
||||||
|
const float rank = (float) b->ne[0];
|
||||||
|
const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale;
|
||||||
|
return scale;
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_adapter_lora_weight() = default;
|
||||||
|
llama_adapter_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct llama_adapter_lora {
|
||||||
|
// map tensor name to lora_a_b
|
||||||
|
std::unordered_map<std::string, struct llama_adapter_lora_weight> ab_map;
|
||||||
|
|
||||||
|
std::vector<ggml_context_ptr> ctxs;
|
||||||
|
std::vector<ggml_backend_buffer_ptr> bufs;
|
||||||
|
|
||||||
|
float alpha;
|
||||||
|
|
||||||
|
llama_adapter_lora() = default;
|
||||||
|
~llama_adapter_lora() = default;
|
||||||
|
|
||||||
|
llama_adapter_lora_weight * get_weight(struct ggml_tensor * w);
|
||||||
|
};
|
1492
examples/talk-llama/llama-arch.cpp
Normal file
1492
examples/talk-llama/llama-arch.cpp
Normal file
File diff suppressed because it is too large
Load Diff
402
examples/talk-llama/llama-arch.h
Normal file
402
examples/talk-llama/llama-arch.h
Normal file
@ -0,0 +1,402 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h" // ggml_op
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
//
|
||||||
|
// gguf constants (sync with gguf.py)
|
||||||
|
//
|
||||||
|
|
||||||
|
enum llm_arch {
|
||||||
|
LLM_ARCH_LLAMA,
|
||||||
|
LLM_ARCH_DECI,
|
||||||
|
LLM_ARCH_FALCON,
|
||||||
|
LLM_ARCH_BAICHUAN,
|
||||||
|
LLM_ARCH_GROK,
|
||||||
|
LLM_ARCH_GPT2,
|
||||||
|
LLM_ARCH_GPTJ,
|
||||||
|
LLM_ARCH_GPTNEOX,
|
||||||
|
LLM_ARCH_MPT,
|
||||||
|
LLM_ARCH_STARCODER,
|
||||||
|
LLM_ARCH_REFACT,
|
||||||
|
LLM_ARCH_BERT,
|
||||||
|
LLM_ARCH_NOMIC_BERT,
|
||||||
|
LLM_ARCH_JINA_BERT_V2,
|
||||||
|
LLM_ARCH_BLOOM,
|
||||||
|
LLM_ARCH_STABLELM,
|
||||||
|
LLM_ARCH_QWEN,
|
||||||
|
LLM_ARCH_QWEN2,
|
||||||
|
LLM_ARCH_QWEN2MOE,
|
||||||
|
LLM_ARCH_QWEN2VL,
|
||||||
|
LLM_ARCH_PHI2,
|
||||||
|
LLM_ARCH_PHI3,
|
||||||
|
LLM_ARCH_PHIMOE,
|
||||||
|
LLM_ARCH_PLAMO,
|
||||||
|
LLM_ARCH_CODESHELL,
|
||||||
|
LLM_ARCH_ORION,
|
||||||
|
LLM_ARCH_INTERNLM2,
|
||||||
|
LLM_ARCH_MINICPM,
|
||||||
|
LLM_ARCH_MINICPM3,
|
||||||
|
LLM_ARCH_GEMMA,
|
||||||
|
LLM_ARCH_GEMMA2,
|
||||||
|
LLM_ARCH_STARCODER2,
|
||||||
|
LLM_ARCH_MAMBA,
|
||||||
|
LLM_ARCH_XVERSE,
|
||||||
|
LLM_ARCH_COMMAND_R,
|
||||||
|
LLM_ARCH_COHERE2,
|
||||||
|
LLM_ARCH_DBRX,
|
||||||
|
LLM_ARCH_OLMO,
|
||||||
|
LLM_ARCH_OLMO2,
|
||||||
|
LLM_ARCH_OLMOE,
|
||||||
|
LLM_ARCH_OPENELM,
|
||||||
|
LLM_ARCH_ARCTIC,
|
||||||
|
LLM_ARCH_DEEPSEEK,
|
||||||
|
LLM_ARCH_DEEPSEEK2,
|
||||||
|
LLM_ARCH_CHATGLM,
|
||||||
|
LLM_ARCH_BITNET,
|
||||||
|
LLM_ARCH_T5,
|
||||||
|
LLM_ARCH_T5ENCODER,
|
||||||
|
LLM_ARCH_JAIS,
|
||||||
|
LLM_ARCH_NEMOTRON,
|
||||||
|
LLM_ARCH_EXAONE,
|
||||||
|
LLM_ARCH_RWKV6,
|
||||||
|
LLM_ARCH_RWKV6QWEN2,
|
||||||
|
LLM_ARCH_GRANITE,
|
||||||
|
LLM_ARCH_GRANITE_MOE,
|
||||||
|
LLM_ARCH_CHAMELEON,
|
||||||
|
LLM_ARCH_WAVTOKENIZER_DEC,
|
||||||
|
LLM_ARCH_UNKNOWN,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum llm_kv {
|
||||||
|
LLM_KV_GENERAL_TYPE,
|
||||||
|
LLM_KV_GENERAL_ARCHITECTURE,
|
||||||
|
LLM_KV_GENERAL_QUANTIZATION_VERSION,
|
||||||
|
LLM_KV_GENERAL_ALIGNMENT,
|
||||||
|
LLM_KV_GENERAL_NAME,
|
||||||
|
LLM_KV_GENERAL_AUTHOR,
|
||||||
|
LLM_KV_GENERAL_VERSION,
|
||||||
|
LLM_KV_GENERAL_URL,
|
||||||
|
LLM_KV_GENERAL_DESCRIPTION,
|
||||||
|
LLM_KV_GENERAL_LICENSE,
|
||||||
|
LLM_KV_GENERAL_SOURCE_URL,
|
||||||
|
LLM_KV_GENERAL_SOURCE_HF_REPO,
|
||||||
|
|
||||||
|
LLM_KV_VOCAB_SIZE,
|
||||||
|
LLM_KV_CONTEXT_LENGTH,
|
||||||
|
LLM_KV_EMBEDDING_LENGTH,
|
||||||
|
LLM_KV_FEATURES_LENGTH,
|
||||||
|
LLM_KV_BLOCK_COUNT,
|
||||||
|
LLM_KV_LEADING_DENSE_BLOCK_COUNT,
|
||||||
|
LLM_KV_FEED_FORWARD_LENGTH,
|
||||||
|
LLM_KV_EXPERT_FEED_FORWARD_LENGTH,
|
||||||
|
LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH,
|
||||||
|
LLM_KV_USE_PARALLEL_RESIDUAL,
|
||||||
|
LLM_KV_TENSOR_DATA_LAYOUT,
|
||||||
|
LLM_KV_EXPERT_COUNT,
|
||||||
|
LLM_KV_EXPERT_USED_COUNT,
|
||||||
|
LLM_KV_EXPERT_SHARED_COUNT,
|
||||||
|
LLM_KV_EXPERT_WEIGHTS_SCALE,
|
||||||
|
LLM_KV_EXPERT_WEIGHTS_NORM,
|
||||||
|
LLM_KV_EXPERT_GATING_FUNC,
|
||||||
|
LLM_KV_POOLING_TYPE,
|
||||||
|
LLM_KV_LOGIT_SCALE,
|
||||||
|
LLM_KV_DECODER_START_TOKEN_ID,
|
||||||
|
LLM_KV_ATTN_LOGIT_SOFTCAPPING,
|
||||||
|
LLM_KV_FINAL_LOGIT_SOFTCAPPING,
|
||||||
|
LLM_KV_SWIN_NORM,
|
||||||
|
LLM_KV_RESCALE_EVERY_N_LAYERS,
|
||||||
|
LLM_KV_TIME_MIX_EXTRA_DIM,
|
||||||
|
LLM_KV_TIME_DECAY_EXTRA_DIM,
|
||||||
|
LLM_KV_RESIDUAL_SCALE,
|
||||||
|
LLM_KV_EMBEDDING_SCALE,
|
||||||
|
LLM_KV_TOKEN_SHIFT_COUNT,
|
||||||
|
|
||||||
|
LLM_KV_ATTENTION_HEAD_COUNT,
|
||||||
|
LLM_KV_ATTENTION_HEAD_COUNT_KV,
|
||||||
|
LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
|
||||||
|
LLM_KV_ATTENTION_CLAMP_KQV,
|
||||||
|
LLM_KV_ATTENTION_KEY_LENGTH,
|
||||||
|
LLM_KV_ATTENTION_VALUE_LENGTH,
|
||||||
|
LLM_KV_ATTENTION_LAYERNORM_EPS,
|
||||||
|
LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
|
||||||
|
LLM_KV_ATTENTION_GROUPNORM_EPS,
|
||||||
|
LLM_KV_ATTENTION_GROUPNORM_GROUPS,
|
||||||
|
LLM_KV_ATTENTION_CAUSAL,
|
||||||
|
LLM_KV_ATTENTION_Q_LORA_RANK,
|
||||||
|
LLM_KV_ATTENTION_KV_LORA_RANK,
|
||||||
|
LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT,
|
||||||
|
LLM_KV_ATTENTION_SLIDING_WINDOW,
|
||||||
|
LLM_KV_ATTENTION_SCALE,
|
||||||
|
|
||||||
|
LLM_KV_ROPE_DIMENSION_COUNT,
|
||||||
|
LLM_KV_ROPE_DIMENSION_SECTIONS,
|
||||||
|
LLM_KV_ROPE_FREQ_BASE,
|
||||||
|
LLM_KV_ROPE_SCALE_LINEAR,
|
||||||
|
LLM_KV_ROPE_SCALING_TYPE,
|
||||||
|
LLM_KV_ROPE_SCALING_FACTOR,
|
||||||
|
LLM_KV_ROPE_SCALING_ATTN_FACTOR,
|
||||||
|
LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
|
||||||
|
LLM_KV_ROPE_SCALING_FINETUNED,
|
||||||
|
LLM_KV_ROPE_SCALING_YARN_LOG_MUL,
|
||||||
|
|
||||||
|
LLM_KV_SPLIT_NO,
|
||||||
|
LLM_KV_SPLIT_COUNT,
|
||||||
|
LLM_KV_SPLIT_TENSORS_COUNT,
|
||||||
|
|
||||||
|
LLM_KV_SSM_INNER_SIZE,
|
||||||
|
LLM_KV_SSM_CONV_KERNEL,
|
||||||
|
LLM_KV_SSM_STATE_SIZE,
|
||||||
|
LLM_KV_SSM_TIME_STEP_RANK,
|
||||||
|
LLM_KV_SSM_DT_B_C_RMS,
|
||||||
|
|
||||||
|
LLM_KV_WKV_HEAD_SIZE,
|
||||||
|
|
||||||
|
LLM_KV_TOKENIZER_MODEL,
|
||||||
|
LLM_KV_TOKENIZER_PRE,
|
||||||
|
LLM_KV_TOKENIZER_LIST,
|
||||||
|
LLM_KV_TOKENIZER_TOKEN_TYPE,
|
||||||
|
LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,
|
||||||
|
LLM_KV_TOKENIZER_SCORES,
|
||||||
|
LLM_KV_TOKENIZER_MERGES,
|
||||||
|
LLM_KV_TOKENIZER_BOS_ID,
|
||||||
|
LLM_KV_TOKENIZER_EOS_ID,
|
||||||
|
LLM_KV_TOKENIZER_EOT_ID,
|
||||||
|
LLM_KV_TOKENIZER_EOM_ID,
|
||||||
|
LLM_KV_TOKENIZER_UNK_ID,
|
||||||
|
LLM_KV_TOKENIZER_SEP_ID,
|
||||||
|
LLM_KV_TOKENIZER_PAD_ID,
|
||||||
|
LLM_KV_TOKENIZER_CLS_ID,
|
||||||
|
LLM_KV_TOKENIZER_MASK_ID,
|
||||||
|
LLM_KV_TOKENIZER_ADD_BOS,
|
||||||
|
LLM_KV_TOKENIZER_ADD_EOS,
|
||||||
|
LLM_KV_TOKENIZER_ADD_PREFIX,
|
||||||
|
LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,
|
||||||
|
LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP,
|
||||||
|
LLM_KV_TOKENIZER_HF_JSON,
|
||||||
|
LLM_KV_TOKENIZER_RWKV,
|
||||||
|
LLM_KV_TOKENIZER_CHAT_TEMPLATE,
|
||||||
|
LLM_KV_TOKENIZER_CHAT_TEMPLATE_N,
|
||||||
|
LLM_KV_TOKENIZER_FIM_PRE_ID,
|
||||||
|
LLM_KV_TOKENIZER_FIM_SUF_ID,
|
||||||
|
LLM_KV_TOKENIZER_FIM_MID_ID,
|
||||||
|
LLM_KV_TOKENIZER_FIM_PAD_ID,
|
||||||
|
LLM_KV_TOKENIZER_FIM_REP_ID,
|
||||||
|
LLM_KV_TOKENIZER_FIM_SEP_ID,
|
||||||
|
|
||||||
|
LLM_KV_ADAPTER_TYPE,
|
||||||
|
LLM_KV_ADAPTER_LORA_ALPHA,
|
||||||
|
|
||||||
|
LLM_KV_POSNET_EMBEDDING_LENGTH,
|
||||||
|
LLM_KV_POSNET_BLOCK_COUNT,
|
||||||
|
|
||||||
|
LLM_KV_CONVNEXT_EMBEDDING_LENGTH,
|
||||||
|
LLM_KV_CONVNEXT_BLOCK_COUNT,
|
||||||
|
|
||||||
|
// deprecated:
|
||||||
|
LLM_KV_TOKENIZER_PREFIX_ID,
|
||||||
|
LLM_KV_TOKENIZER_SUFFIX_ID,
|
||||||
|
LLM_KV_TOKENIZER_MIDDLE_ID,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum llm_tensor {
|
||||||
|
LLM_TENSOR_TOKEN_EMBD,
|
||||||
|
LLM_TENSOR_TOKEN_EMBD_NORM,
|
||||||
|
LLM_TENSOR_TOKEN_TYPES,
|
||||||
|
LLM_TENSOR_POS_EMBD,
|
||||||
|
LLM_TENSOR_OUTPUT,
|
||||||
|
LLM_TENSOR_OUTPUT_NORM,
|
||||||
|
LLM_TENSOR_ROPE_FREQS,
|
||||||
|
LLM_TENSOR_ROPE_FACTORS_LONG,
|
||||||
|
LLM_TENSOR_ROPE_FACTORS_SHORT,
|
||||||
|
LLM_TENSOR_ATTN_Q,
|
||||||
|
LLM_TENSOR_ATTN_K,
|
||||||
|
LLM_TENSOR_ATTN_V,
|
||||||
|
LLM_TENSOR_ATTN_QKV,
|
||||||
|
LLM_TENSOR_ATTN_OUT,
|
||||||
|
LLM_TENSOR_ATTN_NORM,
|
||||||
|
LLM_TENSOR_ATTN_NORM_2,
|
||||||
|
LLM_TENSOR_ATTN_OUT_NORM,
|
||||||
|
LLM_TENSOR_ATTN_POST_NORM,
|
||||||
|
LLM_TENSOR_ATTN_ROT_EMBD,
|
||||||
|
LLM_TENSOR_FFN_GATE_INP,
|
||||||
|
LLM_TENSOR_FFN_GATE_INP_SHEXP,
|
||||||
|
LLM_TENSOR_FFN_NORM,
|
||||||
|
LLM_TENSOR_FFN_POST_NORM,
|
||||||
|
LLM_TENSOR_FFN_GATE,
|
||||||
|
LLM_TENSOR_FFN_DOWN,
|
||||||
|
LLM_TENSOR_FFN_UP,
|
||||||
|
LLM_TENSOR_FFN_ACT,
|
||||||
|
LLM_TENSOR_FFN_DOWN_EXP, // split experts for backward compatibility
|
||||||
|
LLM_TENSOR_FFN_GATE_EXP,
|
||||||
|
LLM_TENSOR_FFN_UP_EXP,
|
||||||
|
LLM_TENSOR_FFN_NORM_EXPS,
|
||||||
|
LLM_TENSOR_FFN_DOWN_EXPS, // merged experts
|
||||||
|
LLM_TENSOR_FFN_GATE_EXPS,
|
||||||
|
LLM_TENSOR_FFN_UP_EXPS,
|
||||||
|
LLM_TENSOR_FFN_DOWN_SHEXP,
|
||||||
|
LLM_TENSOR_FFN_GATE_SHEXP,
|
||||||
|
LLM_TENSOR_FFN_UP_SHEXP,
|
||||||
|
LLM_TENSOR_FFN_EXP_PROBS_B,
|
||||||
|
LLM_TENSOR_ATTN_Q_NORM,
|
||||||
|
LLM_TENSOR_ATTN_K_NORM,
|
||||||
|
LLM_TENSOR_LAYER_OUT_NORM,
|
||||||
|
LLM_TENSOR_SSM_IN,
|
||||||
|
LLM_TENSOR_SSM_CONV1D,
|
||||||
|
LLM_TENSOR_SSM_X,
|
||||||
|
LLM_TENSOR_SSM_DT,
|
||||||
|
LLM_TENSOR_SSM_A,
|
||||||
|
LLM_TENSOR_SSM_D,
|
||||||
|
LLM_TENSOR_SSM_OUT,
|
||||||
|
LLM_TENSOR_TIME_MIX_W1,
|
||||||
|
LLM_TENSOR_TIME_MIX_W2,
|
||||||
|
LLM_TENSOR_TIME_MIX_LERP_X,
|
||||||
|
LLM_TENSOR_TIME_MIX_LERP_W,
|
||||||
|
LLM_TENSOR_TIME_MIX_LERP_K,
|
||||||
|
LLM_TENSOR_TIME_MIX_LERP_V,
|
||||||
|
LLM_TENSOR_TIME_MIX_LERP_R,
|
||||||
|
LLM_TENSOR_TIME_MIX_LERP_G,
|
||||||
|
LLM_TENSOR_TIME_MIX_LERP_FUSED,
|
||||||
|
LLM_TENSOR_TIME_MIX_FIRST,
|
||||||
|
LLM_TENSOR_TIME_MIX_DECAY,
|
||||||
|
LLM_TENSOR_TIME_MIX_DECAY_W1,
|
||||||
|
LLM_TENSOR_TIME_MIX_DECAY_W2,
|
||||||
|
LLM_TENSOR_TIME_MIX_KEY,
|
||||||
|
LLM_TENSOR_TIME_MIX_VALUE,
|
||||||
|
LLM_TENSOR_TIME_MIX_RECEPTANCE,
|
||||||
|
LLM_TENSOR_TIME_MIX_GATE,
|
||||||
|
LLM_TENSOR_TIME_MIX_LN,
|
||||||
|
LLM_TENSOR_TIME_MIX_OUTPUT,
|
||||||
|
LLM_TENSOR_CHANNEL_MIX_LERP_K,
|
||||||
|
LLM_TENSOR_CHANNEL_MIX_LERP_R,
|
||||||
|
LLM_TENSOR_CHANNEL_MIX_KEY,
|
||||||
|
LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,
|
||||||
|
LLM_TENSOR_CHANNEL_MIX_VALUE,
|
||||||
|
LLM_TENSOR_ATTN_Q_A,
|
||||||
|
LLM_TENSOR_ATTN_Q_B,
|
||||||
|
LLM_TENSOR_ATTN_KV_A_MQA,
|
||||||
|
LLM_TENSOR_ATTN_KV_B,
|
||||||
|
LLM_TENSOR_ATTN_Q_A_NORM,
|
||||||
|
LLM_TENSOR_ATTN_KV_A_NORM,
|
||||||
|
LLM_TENSOR_ATTN_SUB_NORM,
|
||||||
|
LLM_TENSOR_FFN_SUB_NORM,
|
||||||
|
LLM_TENSOR_DEC_ATTN_NORM,
|
||||||
|
LLM_TENSOR_DEC_ATTN_Q,
|
||||||
|
LLM_TENSOR_DEC_ATTN_K,
|
||||||
|
LLM_TENSOR_DEC_ATTN_V,
|
||||||
|
LLM_TENSOR_DEC_ATTN_OUT,
|
||||||
|
LLM_TENSOR_DEC_ATTN_REL_B,
|
||||||
|
LLM_TENSOR_DEC_CROSS_ATTN_NORM,
|
||||||
|
LLM_TENSOR_DEC_CROSS_ATTN_Q,
|
||||||
|
LLM_TENSOR_DEC_CROSS_ATTN_K,
|
||||||
|
LLM_TENSOR_DEC_CROSS_ATTN_V,
|
||||||
|
LLM_TENSOR_DEC_CROSS_ATTN_OUT,
|
||||||
|
LLM_TENSOR_DEC_CROSS_ATTN_REL_B,
|
||||||
|
LLM_TENSOR_DEC_FFN_NORM,
|
||||||
|
LLM_TENSOR_DEC_FFN_GATE,
|
||||||
|
LLM_TENSOR_DEC_FFN_DOWN,
|
||||||
|
LLM_TENSOR_DEC_FFN_UP,
|
||||||
|
LLM_TENSOR_DEC_OUTPUT_NORM,
|
||||||
|
LLM_TENSOR_ENC_ATTN_NORM,
|
||||||
|
LLM_TENSOR_ENC_ATTN_Q,
|
||||||
|
LLM_TENSOR_ENC_ATTN_K,
|
||||||
|
LLM_TENSOR_ENC_ATTN_V,
|
||||||
|
LLM_TENSOR_ENC_ATTN_OUT,
|
||||||
|
LLM_TENSOR_ENC_ATTN_REL_B,
|
||||||
|
LLM_TENSOR_ENC_FFN_NORM,
|
||||||
|
LLM_TENSOR_ENC_FFN_GATE,
|
||||||
|
LLM_TENSOR_ENC_FFN_DOWN,
|
||||||
|
LLM_TENSOR_ENC_FFN_UP,
|
||||||
|
LLM_TENSOR_ENC_OUTPUT_NORM,
|
||||||
|
LLM_TENSOR_CLS,
|
||||||
|
LLM_TENSOR_CLS_OUT,
|
||||||
|
LLM_TENSOR_CONV1D,
|
||||||
|
LLM_TENSOR_CONVNEXT_DW,
|
||||||
|
LLM_TENSOR_CONVNEXT_NORM,
|
||||||
|
LLM_TENSOR_CONVNEXT_PW1,
|
||||||
|
LLM_TENSOR_CONVNEXT_PW2,
|
||||||
|
LLM_TENSOR_CONVNEXT_GAMMA,
|
||||||
|
LLM_TENSOR_POS_NET_CONV1,
|
||||||
|
LLM_TENSOR_POS_NET_CONV2,
|
||||||
|
LLM_TENSOR_POS_NET_NORM,
|
||||||
|
LLM_TENSOR_POS_NET_NORM1,
|
||||||
|
LLM_TENSOR_POS_NET_NORM2,
|
||||||
|
LLM_TENSOR_POS_NET_ATTN_NORM,
|
||||||
|
LLM_TENSOR_POS_NET_ATTN_Q,
|
||||||
|
LLM_TENSOR_POS_NET_ATTN_K,
|
||||||
|
LLM_TENSOR_POS_NET_ATTN_V,
|
||||||
|
LLM_TENSOR_POS_NET_ATTN_OUT,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum llm_tensor_layer {
|
||||||
|
LLM_TENSOR_LAYER_INPUT,
|
||||||
|
LLM_TENSOR_LAYER_REPEATING,
|
||||||
|
LLM_TENSOR_LAYER_OUTPUT,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct LLM_KV {
|
||||||
|
LLM_KV(llm_arch arch, const char * suffix = nullptr);
|
||||||
|
|
||||||
|
llm_arch arch;
|
||||||
|
const char * suffix;
|
||||||
|
|
||||||
|
std::string operator()(llm_kv kv) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
// helper to handle gguf constants
|
||||||
|
// usage:
|
||||||
|
//
|
||||||
|
// const auto tn = LLM_TN(LLM_ARCH_LLAMA);
|
||||||
|
//
|
||||||
|
// std::string name = tn(LLM_TENSOR_OUTPUT); -> "output"
|
||||||
|
// std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias"
|
||||||
|
// std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight"
|
||||||
|
//
|
||||||
|
struct LLM_TN_IMPL {
|
||||||
|
const llm_arch arch;
|
||||||
|
const llm_tensor tensor;
|
||||||
|
const char * const suffix;
|
||||||
|
const int bid;
|
||||||
|
const int xid;
|
||||||
|
|
||||||
|
std::string str() const;
|
||||||
|
|
||||||
|
operator std::string() const {
|
||||||
|
return str();
|
||||||
|
}
|
||||||
|
|
||||||
|
friend bool operator==(const std::string & str, const LLM_TN_IMPL & tn) {
|
||||||
|
return str == tn.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
friend bool operator!=(const std::string & str, const LLM_TN_IMPL & tn) {
|
||||||
|
return str != tn.str();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct LLM_TN {
|
||||||
|
LLM_TN(llm_arch arch) : arch(arch) {}
|
||||||
|
|
||||||
|
llm_arch arch;
|
||||||
|
|
||||||
|
LLM_TN_IMPL operator()(llm_tensor tensor, const char * suffix, int bid = -1, int xid = -1) const {
|
||||||
|
return { arch, tensor, suffix, bid, xid };
|
||||||
|
}
|
||||||
|
|
||||||
|
LLM_TN_IMPL operator()(llm_tensor tensor, int bid = -1, int xid = -1) const {
|
||||||
|
return { arch, tensor, nullptr, bid, xid };
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct llm_tensor_info {
|
||||||
|
llm_tensor_layer layer;
|
||||||
|
ggml_op op;
|
||||||
|
};
|
||||||
|
|
||||||
|
const char * llm_arch_name(llm_arch arch);
|
||||||
|
|
||||||
|
llm_arch llm_arch_from_string(const std::string & name);
|
||||||
|
|
||||||
|
const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor);
|
368
examples/talk-llama/llama-batch.cpp
Normal file
368
examples/talk-llama/llama-batch.cpp
Normal file
@ -0,0 +1,368 @@
|
|||||||
|
#include "llama-batch.h"
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
|
llama_ubatch llama_sbatch::reserve_ubatch(size_t n_ubatch, bool has_embd) {
|
||||||
|
// clear empty sequences
|
||||||
|
// the previous ubatch is assumed to be gone,
|
||||||
|
// so nothing should refer to values in these sequences anymore.
|
||||||
|
for (size_t i = seq.size(); i-- > 0;) {
|
||||||
|
if (seq[i].length == 0) {
|
||||||
|
seq.pop_back();
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ubatch_token.resize(!has_embd ? n_ubatch : 0);
|
||||||
|
ubatch_embd.resize(has_embd ? n_embd * n_ubatch : 0);
|
||||||
|
ubatch_pos.resize(n_ubatch);
|
||||||
|
ubatch_n_seq_id.resize(n_ubatch);
|
||||||
|
ubatch_seq_id.resize(n_ubatch);
|
||||||
|
ubatch_output.resize(n_ubatch);
|
||||||
|
llama_ubatch ubatch = {
|
||||||
|
/*equal_seqs =*/ true,
|
||||||
|
/*n_tokens =*/ 0,
|
||||||
|
/*n_seq_tokens =*/ 0,
|
||||||
|
/*n_seqs =*/ 0,
|
||||||
|
/*token =*/ !has_embd ? ubatch_token.data() : nullptr,
|
||||||
|
/*embd =*/ has_embd ? ubatch_embd.data() : nullptr,
|
||||||
|
/*pos =*/ ubatch_pos.data(),
|
||||||
|
/*n_seq_id =*/ ubatch_n_seq_id.data(),
|
||||||
|
/*seq_id =*/ ubatch_seq_id.data(),
|
||||||
|
/*output =*/ ubatch_output.data(),
|
||||||
|
};
|
||||||
|
return ubatch;
|
||||||
|
}
|
||||||
|
|
||||||
|
void llama_sbatch::add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length) {
|
||||||
|
GGML_ASSERT(batch != nullptr);
|
||||||
|
GGML_ASSERT(length <= seq.length);
|
||||||
|
// Can only add sequences of equal lengths to a batch,
|
||||||
|
// otherwise it isn't clear to which sequence a token belongs
|
||||||
|
GGML_ASSERT(seq.n_seq_id == 0 || ubatch.n_seqs == 0 || length == (size_t) ubatch.n_tokens / ubatch.n_seqs);
|
||||||
|
GGML_ASSERT((seq.n_seq_id != 0) == ubatch.equal_seqs);
|
||||||
|
// NOTE: loops are separated for cache-friendliness
|
||||||
|
if (batch->token) {
|
||||||
|
if (ubatch.equal_seqs) {
|
||||||
|
for (size_t i = 0; i < length; ++i) {
|
||||||
|
ubatch.token[ubatch.n_tokens + i] = batch->token[ids[seq.offset + i]];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// simple split
|
||||||
|
ubatch.token = batch->token + seq.offset;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ubatch.token = nullptr;
|
||||||
|
}
|
||||||
|
if (batch->embd) {
|
||||||
|
if (ubatch.equal_seqs) {
|
||||||
|
for (size_t i = 0; i < length; ++i) {
|
||||||
|
memcpy(
|
||||||
|
ubatch.embd + (n_embd * (ubatch.n_tokens + i)),
|
||||||
|
batch->embd + (n_embd * ids[seq.offset + i]),
|
||||||
|
n_embd * sizeof(float)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// simple split
|
||||||
|
ubatch.embd = batch->embd + (n_embd * seq.offset);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ubatch.embd = nullptr;
|
||||||
|
}
|
||||||
|
if (ubatch.equal_seqs) {
|
||||||
|
for (size_t i = 0; i < length; ++i) {
|
||||||
|
ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// simple split
|
||||||
|
ubatch.pos = batch->pos + seq.offset;
|
||||||
|
}
|
||||||
|
if (ubatch.equal_seqs) {
|
||||||
|
ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id;
|
||||||
|
if (seq.seq_id) {
|
||||||
|
ubatch.seq_id[ubatch.n_seqs] = seq.seq_id;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// simple split
|
||||||
|
if (batch->n_seq_id) {
|
||||||
|
ubatch.n_seq_id = batch->n_seq_id + seq.offset;
|
||||||
|
} else {
|
||||||
|
for (size_t i = 0; i < length; ++i) {
|
||||||
|
ubatch.n_seq_id[ubatch.n_seqs + i] = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (batch->seq_id) {
|
||||||
|
ubatch.seq_id = batch->seq_id + seq.offset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (logits_all) {
|
||||||
|
for (size_t i = 0; i < length; ++i) {
|
||||||
|
ubatch.output[ubatch.n_tokens + i] = 1;
|
||||||
|
out_ids.push_back(ids[seq.offset + i]);
|
||||||
|
}
|
||||||
|
} else if (batch->logits) {
|
||||||
|
if (ubatch.equal_seqs) {
|
||||||
|
for (size_t i = 0; i < length; ++i) {
|
||||||
|
size_t id = ids[seq.offset + i];
|
||||||
|
int8_t is_output = batch->logits[id];
|
||||||
|
ubatch.output[ubatch.n_tokens + i] = is_output;
|
||||||
|
if (is_output) { out_ids.push_back(id); }
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// simple split
|
||||||
|
ubatch.output = batch->logits + seq.offset;
|
||||||
|
for (size_t i = 0; i < length; ++i) {
|
||||||
|
if (ubatch.output[i] != 0) { out_ids.push_back(seq.offset + i); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// only get last output
|
||||||
|
for (size_t i = 0; i < length; ++i) {
|
||||||
|
size_t id = ids[seq.offset + i];
|
||||||
|
int8_t is_last = id == ids.size() - 1;
|
||||||
|
ubatch.output[ubatch.n_tokens + i] = is_last;
|
||||||
|
if (is_last) { out_ids.push_back(id); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) {
|
||||||
|
ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1;
|
||||||
|
}
|
||||||
|
ubatch.n_tokens += length;
|
||||||
|
ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits
|
||||||
|
seq.offset += length;
|
||||||
|
seq.length -= length;
|
||||||
|
n_tokens -= length;
|
||||||
|
GGML_ASSERT(ubatch.n_tokens == ubatch.n_seq_tokens * ubatch.n_seqs);
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_ubatch llama_sbatch::split_simple(size_t n_ubatch) {
|
||||||
|
n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
|
||||||
|
llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
|
||||||
|
ubatch.equal_seqs = false;
|
||||||
|
if (!seq.empty()) {
|
||||||
|
llama_sbatch_seq & s = seq[0];
|
||||||
|
size_t length = s.length < n_ubatch ? s.length : n_ubatch;
|
||||||
|
GGML_ASSERT(seq.size() == 1 && s.n_seq_id == 0); // don't mix with other splits
|
||||||
|
add_seq_to_ubatch(ubatch, s, length);
|
||||||
|
}
|
||||||
|
return ubatch;
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_ubatch llama_sbatch::split_equal(size_t n_ubatch) {
|
||||||
|
n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
|
||||||
|
llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
|
||||||
|
if (!seq.empty()) {
|
||||||
|
size_t length = 0;
|
||||||
|
size_t n_tokens_in_ubatch = 0;
|
||||||
|
GGML_ASSERT(seq[0].n_seq_id > 0); // should not be mixed with simple splits
|
||||||
|
// smallest first, because it's easier to split this way;
|
||||||
|
// starting from the end to pop in constant time.
|
||||||
|
for (size_t i = seq.size(); i-- > 0;) {
|
||||||
|
llama_sbatch_seq & s = seq[i];
|
||||||
|
GGML_ASSERT(s.length > 0);
|
||||||
|
if (length == 0) {
|
||||||
|
length = s.length < n_ubatch ? s.length : n_ubatch;
|
||||||
|
}
|
||||||
|
add_seq_to_ubatch(ubatch, s, length);
|
||||||
|
n_tokens_in_ubatch += length;
|
||||||
|
// shared prompts can't be mixed with any of their sequences,
|
||||||
|
// so it's safer to compute them in their own ubatch
|
||||||
|
if (s.n_seq_id > 1) { break; }
|
||||||
|
// stop when there isn't enough space for another sequence
|
||||||
|
if (length + n_tokens_in_ubatch > n_ubatch) { break; }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ubatch;
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_ubatch llama_sbatch::split_seq(size_t n_ubatch) {
|
||||||
|
n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
|
||||||
|
llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
|
||||||
|
if (!seq.empty()) {
|
||||||
|
llama_sbatch_seq & s = seq[seq.size() - 1];
|
||||||
|
size_t length = s.length < n_ubatch ? s.length : n_ubatch;
|
||||||
|
GGML_ASSERT(s.n_seq_id > 0); // should not be mixed with simple splits
|
||||||
|
add_seq_to_ubatch(ubatch, s, length);
|
||||||
|
}
|
||||||
|
return ubatch;
|
||||||
|
}
|
||||||
|
|
||||||
|
void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool simple_split, bool logits_all) {
|
||||||
|
GGML_ASSERT(batch.n_tokens >= 0);
|
||||||
|
this->batch = &batch;
|
||||||
|
this->n_embd = n_embd;
|
||||||
|
this->logits_all = logits_all;
|
||||||
|
|
||||||
|
n_tokens = batch.n_tokens;
|
||||||
|
ids.resize(n_tokens);
|
||||||
|
out_ids.clear();
|
||||||
|
// TODO: reserve out_ids and seq
|
||||||
|
|
||||||
|
for (size_t i = 0; i < n_tokens; ++i) {
|
||||||
|
ids[i] = i;
|
||||||
|
}
|
||||||
|
if (simple_split) {
|
||||||
|
seq.resize(1);
|
||||||
|
llama_sbatch_seq & s = seq[0];
|
||||||
|
s.n_seq_id = 0;
|
||||||
|
s.seq_id = nullptr;
|
||||||
|
s.offset = 0;
|
||||||
|
s.length = n_tokens;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
std::sort(ids.begin(), ids.end(),
|
||||||
|
[&batch](size_t a, size_t b) {
|
||||||
|
int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1;
|
||||||
|
int32_t n_seq_b = batch.n_seq_id ? batch.n_seq_id[b] : 1;
|
||||||
|
// sort by seq_id, then by pos
|
||||||
|
if (n_seq_a == n_seq_b) {
|
||||||
|
if (batch.seq_id) {
|
||||||
|
for (int32_t i = 0; i < n_seq_a; ++i) {
|
||||||
|
llama_seq_id seq_id_a = batch.seq_id[a][i];
|
||||||
|
llama_seq_id seq_id_b = batch.seq_id[b][i];
|
||||||
|
// smaller seq_ids go first
|
||||||
|
if (seq_id_a != seq_id_b) {
|
||||||
|
return seq_id_a < seq_id_b;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// when all else is equal, sort by pos
|
||||||
|
if (batch.pos) {
|
||||||
|
return batch.pos[a] < batch.pos[b];
|
||||||
|
}
|
||||||
|
// no pos, sort by id
|
||||||
|
return a < b;
|
||||||
|
}
|
||||||
|
// shared prompts go first
|
||||||
|
return n_seq_a > n_seq_b;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
// init seq
|
||||||
|
llama_sbatch_seq * last_seq = nullptr;
|
||||||
|
|
||||||
|
for (size_t i = 0; i < n_tokens; ++i) {
|
||||||
|
const size_t bi = ids[i];
|
||||||
|
const int32_t n_seqs = batch.n_seq_id[bi];
|
||||||
|
llama_seq_id * seq_ids = batch.seq_id[bi];
|
||||||
|
if (last_seq != nullptr) {
|
||||||
|
bool same = n_seqs == last_seq->n_seq_id;
|
||||||
|
for (int32_t j = 0; same && j < n_seqs; ++j) {
|
||||||
|
if (seq_ids[j] != last_seq->seq_id[j]) {
|
||||||
|
same = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (same) {
|
||||||
|
last_seq->length += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1};
|
||||||
|
seq.push_back(new_seq);
|
||||||
|
last_seq = &seq.back();
|
||||||
|
}
|
||||||
|
// keep shared prompts first at the end, then sort by length descending.
|
||||||
|
std::sort(seq.begin(), seq.end(),
|
||||||
|
[](llama_sbatch_seq & a, llama_sbatch_seq & b) {
|
||||||
|
if (a.n_seq_id == b.n_seq_id) {
|
||||||
|
return a.length > b.length;
|
||||||
|
}
|
||||||
|
return a.n_seq_id < b.n_seq_id;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0) {
|
||||||
|
batch = in_batch;
|
||||||
|
GGML_ASSERT(batch.n_tokens > 0);
|
||||||
|
if (!batch.pos) {
|
||||||
|
pos.resize(batch.n_tokens);
|
||||||
|
for (int32_t i = 0; i < batch.n_tokens; i++) {
|
||||||
|
pos[i] = i + p0;
|
||||||
|
}
|
||||||
|
batch.pos = pos.data();
|
||||||
|
}
|
||||||
|
if (!batch.n_seq_id) {
|
||||||
|
n_seq_id.resize(batch.n_tokens);
|
||||||
|
for (int32_t i = 0; i < batch.n_tokens; i++) {
|
||||||
|
n_seq_id[i] = seq_id_0.size();
|
||||||
|
}
|
||||||
|
batch.n_seq_id = n_seq_id.data();
|
||||||
|
}
|
||||||
|
if (!batch.seq_id) {
|
||||||
|
seq_id.resize(batch.n_tokens + 1);
|
||||||
|
seq_id[batch.n_tokens] = NULL;
|
||||||
|
for (int32_t i = 0; i < batch.n_tokens; i++) {
|
||||||
|
seq_id[i] = seq_id_0.data();
|
||||||
|
}
|
||||||
|
batch.seq_id = seq_id.data();
|
||||||
|
}
|
||||||
|
if (!batch.logits) {
|
||||||
|
logits.resize(batch.n_tokens);
|
||||||
|
logits[logits.size() - 1] = true;
|
||||||
|
batch.logits = logits.data();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// interface implementation
|
||||||
|
//
|
||||||
|
|
||||||
|
struct llama_batch llama_batch_get_one(
|
||||||
|
llama_token * tokens,
|
||||||
|
int32_t n_tokens) {
|
||||||
|
return {
|
||||||
|
/*n_tokens =*/ n_tokens,
|
||||||
|
/*tokens =*/ tokens,
|
||||||
|
/*embd =*/ nullptr,
|
||||||
|
/*pos =*/ nullptr,
|
||||||
|
/*n_seq_id =*/ nullptr,
|
||||||
|
/*seq_id =*/ nullptr,
|
||||||
|
/*logits =*/ nullptr,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) {
|
||||||
|
llama_batch batch = {
|
||||||
|
/*n_tokens =*/ 0,
|
||||||
|
/*tokens =*/ nullptr,
|
||||||
|
/*embd =*/ nullptr,
|
||||||
|
/*pos =*/ nullptr,
|
||||||
|
/*n_seq_id =*/ nullptr,
|
||||||
|
/*seq_id =*/ nullptr,
|
||||||
|
/*logits =*/ nullptr,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (embd) {
|
||||||
|
batch.embd = (float *) malloc(sizeof(float) * n_tokens_alloc * embd);
|
||||||
|
} else {
|
||||||
|
batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens_alloc);
|
||||||
|
}
|
||||||
|
|
||||||
|
batch.pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens_alloc);
|
||||||
|
batch.n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens_alloc);
|
||||||
|
batch.seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * (n_tokens_alloc + 1));
|
||||||
|
for (int i = 0; i < n_tokens_alloc; ++i) {
|
||||||
|
batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
|
||||||
|
}
|
||||||
|
batch.seq_id[n_tokens_alloc] = nullptr;
|
||||||
|
|
||||||
|
batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens_alloc);
|
||||||
|
|
||||||
|
return batch;
|
||||||
|
}
|
||||||
|
|
||||||
|
void llama_batch_free(struct llama_batch batch) {
|
||||||
|
if (batch.token) free(batch.token);
|
||||||
|
if (batch.embd) free(batch.embd);
|
||||||
|
if (batch.pos) free(batch.pos);
|
||||||
|
if (batch.n_seq_id) free(batch.n_seq_id);
|
||||||
|
if (batch.seq_id) {
|
||||||
|
for (int i = 0; batch.seq_id[i] != nullptr; ++i) {
|
||||||
|
free(batch.seq_id[i]);
|
||||||
|
}
|
||||||
|
free(batch.seq_id);
|
||||||
|
}
|
||||||
|
if (batch.logits) free(batch.logits);
|
||||||
|
}
|
88
examples/talk-llama/llama-batch.h
Normal file
88
examples/talk-llama/llama-batch.h
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
// very similar to llama_batch,
|
||||||
|
// but has more metadata about sequences
|
||||||
|
struct llama_ubatch {
|
||||||
|
bool equal_seqs;
|
||||||
|
// TODO: whole_seqs for embeddings?
|
||||||
|
|
||||||
|
uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs)
|
||||||
|
uint32_t n_seq_tokens; // tokens per sequence
|
||||||
|
uint32_t n_seqs;
|
||||||
|
|
||||||
|
llama_token * token; // [n_tokens]
|
||||||
|
float * embd; // [n_embd, n_tokens]
|
||||||
|
llama_pos * pos; // [n_tokens]
|
||||||
|
int32_t * n_seq_id; // [n_seqs]
|
||||||
|
llama_seq_id ** seq_id; // [n_seqs]
|
||||||
|
int8_t * output; // [n_tokens]
|
||||||
|
};
|
||||||
|
|
||||||
|
struct llama_sbatch_seq {
|
||||||
|
int32_t n_seq_id;
|
||||||
|
|
||||||
|
llama_seq_id * seq_id;
|
||||||
|
|
||||||
|
size_t offset;
|
||||||
|
size_t length;
|
||||||
|
};
|
||||||
|
|
||||||
|
// sequence-length-aware batch splitting
|
||||||
|
struct llama_sbatch {
|
||||||
|
// tokens left in this batch
|
||||||
|
size_t n_tokens;
|
||||||
|
|
||||||
|
size_t n_embd;
|
||||||
|
|
||||||
|
bool logits_all; // TODO: remove once lctx.logits_all is removed too
|
||||||
|
|
||||||
|
// sorted indices into the batch
|
||||||
|
std::vector<size_t> ids;
|
||||||
|
// batch indices of the output
|
||||||
|
std::vector<size_t> out_ids;
|
||||||
|
std::vector<llama_sbatch_seq> seq;
|
||||||
|
|
||||||
|
const llama_batch * batch = nullptr;
|
||||||
|
|
||||||
|
// buffers for the ubatch
|
||||||
|
std::vector<llama_token> ubatch_token;
|
||||||
|
std::vector<float> ubatch_embd;
|
||||||
|
std::vector<llama_pos> ubatch_pos;
|
||||||
|
std::vector<int32_t> ubatch_n_seq_id;
|
||||||
|
std::vector<llama_seq_id *> ubatch_seq_id;
|
||||||
|
std::vector<int8_t> ubatch_output;
|
||||||
|
|
||||||
|
llama_ubatch reserve_ubatch(size_t n_ubatch, bool has_embd = false);
|
||||||
|
|
||||||
|
void add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length);
|
||||||
|
|
||||||
|
// simple split, unknown number of sequences of unequal lengths
|
||||||
|
llama_ubatch split_simple(size_t n_ubatch);
|
||||||
|
|
||||||
|
// make batches of equal-length sequences
|
||||||
|
llama_ubatch split_equal(size_t n_ubatch);
|
||||||
|
|
||||||
|
// sequence-wise split
|
||||||
|
llama_ubatch split_seq(size_t n_ubatch);
|
||||||
|
|
||||||
|
void from_batch(const llama_batch & batch, size_t n_embd, bool simple_split = false, bool logits_all = false);
|
||||||
|
};
|
||||||
|
|
||||||
|
// temporary allocate memory for the input batch if needed
|
||||||
|
struct llama_batch_allocr {
|
||||||
|
struct llama_batch batch;
|
||||||
|
|
||||||
|
std::array<llama_seq_id, 1> seq_id_0 = { 0 }; // default sequence id
|
||||||
|
std::vector<llama_pos> pos;
|
||||||
|
std::vector<int32_t> n_seq_id;
|
||||||
|
std::vector<llama_seq_id *> seq_id;
|
||||||
|
std::vector<int8_t> logits;
|
||||||
|
|
||||||
|
// optionally fulfill the batch returned by llama_batch_get_one
|
||||||
|
llama_batch_allocr(struct llama_batch in_batch, llama_pos p0);
|
||||||
|
};
|
587
examples/talk-llama/llama-chat.cpp
Normal file
587
examples/talk-llama/llama-chat.cpp
Normal file
@ -0,0 +1,587 @@
|
|||||||
|
#include "llama-chat.h"
|
||||||
|
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
|
#if __cplusplus >= 202000L
|
||||||
|
#define LU8(x) (const char*)(u8##x)
|
||||||
|
#else
|
||||||
|
#define LU8(x) u8##x
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// trim whitespace from the beginning and end of a string
|
||||||
|
static std::string trim(const std::string & str) {
|
||||||
|
size_t start = 0;
|
||||||
|
size_t end = str.size();
|
||||||
|
while (start < end && isspace(str[start])) {
|
||||||
|
start += 1;
|
||||||
|
}
|
||||||
|
while (end > start && isspace(str[end - 1])) {
|
||||||
|
end -= 1;
|
||||||
|
}
|
||||||
|
return str.substr(start, end - start);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
|
||||||
|
{ "chatml", LLM_CHAT_TEMPLATE_CHATML },
|
||||||
|
{ "llama2", LLM_CHAT_TEMPLATE_LLAMA_2 },
|
||||||
|
{ "llama2-sys", LLM_CHAT_TEMPLATE_LLAMA_2_SYS },
|
||||||
|
{ "llama2-sys-bos", LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS },
|
||||||
|
{ "llama2-sys-strip", LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP },
|
||||||
|
{ "mistral-v1", LLM_CHAT_TEMPLATE_MISTRAL_V1 },
|
||||||
|
{ "mistral-v3", LLM_CHAT_TEMPLATE_MISTRAL_V3 },
|
||||||
|
{ "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN },
|
||||||
|
{ "mistral-v7", LLM_CHAT_TEMPLATE_MISTRAL_V7 },
|
||||||
|
{ "phi3", LLM_CHAT_TEMPLATE_PHI_3 },
|
||||||
|
{ "phi4", LLM_CHAT_TEMPLATE_PHI_4 },
|
||||||
|
{ "falcon3", LLM_CHAT_TEMPLATE_FALCON_3 },
|
||||||
|
{ "zephyr", LLM_CHAT_TEMPLATE_ZEPHYR },
|
||||||
|
{ "monarch", LLM_CHAT_TEMPLATE_MONARCH },
|
||||||
|
{ "gemma", LLM_CHAT_TEMPLATE_GEMMA },
|
||||||
|
{ "orion", LLM_CHAT_TEMPLATE_ORION },
|
||||||
|
{ "openchat", LLM_CHAT_TEMPLATE_OPENCHAT },
|
||||||
|
{ "vicuna", LLM_CHAT_TEMPLATE_VICUNA },
|
||||||
|
{ "vicuna-orca", LLM_CHAT_TEMPLATE_VICUNA_ORCA },
|
||||||
|
{ "deepseek", LLM_CHAT_TEMPLATE_DEEPSEEK },
|
||||||
|
{ "deepseek2", LLM_CHAT_TEMPLATE_DEEPSEEK_2 },
|
||||||
|
{ "deepseek3", LLM_CHAT_TEMPLATE_DEEPSEEK_3 },
|
||||||
|
{ "command-r", LLM_CHAT_TEMPLATE_COMMAND_R },
|
||||||
|
{ "llama3", LLM_CHAT_TEMPLATE_LLAMA_3 },
|
||||||
|
{ "chatglm3", LLM_CHAT_TEMPLATE_CHATGML_3 },
|
||||||
|
{ "chatglm4", LLM_CHAT_TEMPLATE_CHATGML_4 },
|
||||||
|
{ "glmedge", LLM_CHAT_TEMPLATE_GLMEDGE },
|
||||||
|
{ "minicpm", LLM_CHAT_TEMPLATE_MINICPM },
|
||||||
|
{ "exaone3", LLM_CHAT_TEMPLATE_EXAONE_3 },
|
||||||
|
{ "rwkv-world", LLM_CHAT_TEMPLATE_RWKV_WORLD },
|
||||||
|
{ "granite", LLM_CHAT_TEMPLATE_GRANITE },
|
||||||
|
{ "gigachat", LLM_CHAT_TEMPLATE_GIGACHAT },
|
||||||
|
{ "megrez", LLM_CHAT_TEMPLATE_MEGREZ },
|
||||||
|
};
|
||||||
|
|
||||||
|
llm_chat_template llm_chat_template_from_str(const std::string & name) {
|
||||||
|
return LLM_CHAT_TEMPLATES.at(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
|
||||||
|
try {
|
||||||
|
return llm_chat_template_from_str(tmpl);
|
||||||
|
} catch (const std::out_of_range &) {
|
||||||
|
// ignore
|
||||||
|
}
|
||||||
|
|
||||||
|
auto tmpl_contains = [&tmpl](const char * haystack) -> bool {
|
||||||
|
return tmpl.find(haystack) != std::string::npos;
|
||||||
|
};
|
||||||
|
if (tmpl_contains("<|im_start|>")) {
|
||||||
|
return tmpl_contains("<|im_sep|>")
|
||||||
|
? LLM_CHAT_TEMPLATE_PHI_4
|
||||||
|
: LLM_CHAT_TEMPLATE_CHATML;
|
||||||
|
} else if (tmpl.find("mistral") == 0 || tmpl_contains("[INST]")) {
|
||||||
|
if (tmpl_contains("[SYSTEM_PROMPT]")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_MISTRAL_V7;
|
||||||
|
} else if (
|
||||||
|
// catches official 'v1' template
|
||||||
|
tmpl_contains("' [INST] ' + system_message")
|
||||||
|
// catches official 'v3' and 'v3-tekken' templates
|
||||||
|
|| tmpl_contains("[AVAILABLE_TOOLS]")
|
||||||
|
) {
|
||||||
|
// Official mistral 'v1', 'v3' and 'v3-tekken' templates
|
||||||
|
// See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md
|
||||||
|
// See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md
|
||||||
|
if (tmpl_contains(" [INST]")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_MISTRAL_V1;
|
||||||
|
} else if (tmpl_contains("\"[INST]\"")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN;
|
||||||
|
}
|
||||||
|
return LLM_CHAT_TEMPLATE_MISTRAL_V3;
|
||||||
|
} else {
|
||||||
|
// llama2 template and its variants
|
||||||
|
// [variant] support system message
|
||||||
|
// See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2
|
||||||
|
bool support_system_message = tmpl_contains("<<SYS>>");
|
||||||
|
bool add_bos_inside_history = tmpl_contains("bos_token + '[INST]");
|
||||||
|
bool strip_message = tmpl_contains("content.strip()");
|
||||||
|
if (strip_message) {
|
||||||
|
return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP;
|
||||||
|
} else if (add_bos_inside_history) {
|
||||||
|
return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS;
|
||||||
|
} else if (support_system_message) {
|
||||||
|
return LLM_CHAT_TEMPLATE_LLAMA_2_SYS;
|
||||||
|
} else {
|
||||||
|
return LLM_CHAT_TEMPLATE_LLAMA_2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_PHI_3;
|
||||||
|
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
|
||||||
|
return tmpl_contains("</s>") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE;
|
||||||
|
} else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_ZEPHYR;
|
||||||
|
} else if (tmpl_contains("bos_token + message['role']")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_MONARCH;
|
||||||
|
} else if (tmpl_contains("<start_of_turn>")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_GEMMA;
|
||||||
|
} else if (tmpl_contains("'\\n\\nAssistant: ' + eos_token")) {
|
||||||
|
// OrionStarAI/Orion-14B-Chat
|
||||||
|
return LLM_CHAT_TEMPLATE_ORION;
|
||||||
|
} else if (tmpl_contains("GPT4 Correct ")) {
|
||||||
|
// openchat/openchat-3.5-0106
|
||||||
|
return LLM_CHAT_TEMPLATE_OPENCHAT;
|
||||||
|
} else if (tmpl_contains("USER: ") && tmpl_contains("ASSISTANT: ")) {
|
||||||
|
// eachadea/vicuna-13b-1.1 (and Orca variant)
|
||||||
|
if (tmpl_contains("SYSTEM: ")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_VICUNA_ORCA;
|
||||||
|
}
|
||||||
|
return LLM_CHAT_TEMPLATE_VICUNA;
|
||||||
|
} else if (tmpl_contains("### Instruction:") && tmpl_contains("<|EOT|>")) {
|
||||||
|
// deepseek-ai/deepseek-coder-33b-instruct
|
||||||
|
return LLM_CHAT_TEMPLATE_DEEPSEEK;
|
||||||
|
} else if (tmpl_contains("<|START_OF_TURN_TOKEN|>") && tmpl_contains("<|USER_TOKEN|>")) {
|
||||||
|
// CohereForAI/c4ai-command-r-plus
|
||||||
|
return LLM_CHAT_TEMPLATE_COMMAND_R;
|
||||||
|
} else if (tmpl_contains("<|start_header_id|>") && tmpl_contains("<|end_header_id|>")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_LLAMA_3;
|
||||||
|
} else if (tmpl_contains("[gMASK]sop")) {
|
||||||
|
// chatglm3-6b
|
||||||
|
return LLM_CHAT_TEMPLATE_CHATGML_3;
|
||||||
|
} else if (tmpl_contains("[gMASK]<sop>")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_CHATGML_4;
|
||||||
|
} else if (tmpl_contains(LU8("<用户>"))) {
|
||||||
|
// MiniCPM-3B-OpenHermes-2.5-v2-GGUF
|
||||||
|
return LLM_CHAT_TEMPLATE_MINICPM;
|
||||||
|
} else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_DEEPSEEK_2;
|
||||||
|
} else if (tmpl_contains(LU8("<|Assistant|>")) && tmpl_contains(LU8("<|User|>")) && tmpl_contains(LU8("<|end▁of▁sentence|>"))) {
|
||||||
|
return LLM_CHAT_TEMPLATE_DEEPSEEK_3;
|
||||||
|
} else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) {
|
||||||
|
// ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
|
||||||
|
// EXAONE-3.0-7.8B-Instruct
|
||||||
|
return LLM_CHAT_TEMPLATE_EXAONE_3;
|
||||||
|
} else if (tmpl_contains("rwkv-world")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_RWKV_WORLD;
|
||||||
|
} else if (tmpl_contains("<|start_of_role|>")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_GRANITE;
|
||||||
|
} else if (tmpl_contains("message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1]")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_GIGACHAT;
|
||||||
|
} else if (tmpl_contains("<|role_start|>")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_MEGREZ;
|
||||||
|
}
|
||||||
|
return LLM_CHAT_TEMPLATE_UNKNOWN;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple version of "llama_apply_chat_template" that only works with strings
|
||||||
|
// This function uses heuristic checks to determine commonly used template. It is not a jinja parser.
|
||||||
|
int32_t llm_chat_apply_template(
|
||||||
|
llm_chat_template tmpl,
|
||||||
|
const std::vector<const llama_chat_message *> & chat,
|
||||||
|
std::string & dest, bool add_ass) {
|
||||||
|
// Taken from the research: https://github.com/ggerganov/llama.cpp/issues/5527
|
||||||
|
std::stringstream ss;
|
||||||
|
if (tmpl == LLM_CHAT_TEMPLATE_CHATML) {
|
||||||
|
// chatml template
|
||||||
|
for (auto message : chat) {
|
||||||
|
ss << "<|im_start|>" << message->role << "\n" << message->content << "<|im_end|>\n";
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|im_start|>assistant\n";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7) {
|
||||||
|
// Official mistral 'v7' template
|
||||||
|
// See: https://huggingface.co/mistralai/Mistral-Large-Instruct-2411#basic-instruct-template-v7
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
std::string content(message->content);
|
||||||
|
if (role == "system") {
|
||||||
|
ss << "[SYSTEM_PROMPT] " << content << "[/SYSTEM_PROMPT]";
|
||||||
|
} else if (role == "user") {
|
||||||
|
ss << "[INST] " << content << "[/INST]";
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
ss << " " << content << "</s>";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1
|
||||||
|
|| tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3
|
||||||
|
|| tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN) {
|
||||||
|
// See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md
|
||||||
|
// See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md
|
||||||
|
std::string leading_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1 ? " " : "";
|
||||||
|
std::string trailing_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN ? "" : " ";
|
||||||
|
bool trim_assistant_message = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3;
|
||||||
|
bool is_inside_turn = false;
|
||||||
|
for (auto message : chat) {
|
||||||
|
if (!is_inside_turn) {
|
||||||
|
ss << leading_space << "[INST]" << trailing_space;
|
||||||
|
is_inside_turn = true;
|
||||||
|
}
|
||||||
|
std::string role(message->role);
|
||||||
|
std::string content(message->content);
|
||||||
|
if (role == "system") {
|
||||||
|
ss << content << "\n\n";
|
||||||
|
} else if (role == "user") {
|
||||||
|
ss << content << leading_space << "[/INST]";
|
||||||
|
} else {
|
||||||
|
ss << trailing_space << (trim_assistant_message ? trim(content) : content) << "</s>";
|
||||||
|
is_inside_turn = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (
|
||||||
|
tmpl == LLM_CHAT_TEMPLATE_LLAMA_2
|
||||||
|
|| tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS
|
||||||
|
|| tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS
|
||||||
|
|| tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP) {
|
||||||
|
// llama2 template and its variants
|
||||||
|
// [variant] support system message
|
||||||
|
// See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2
|
||||||
|
bool support_system_message = tmpl != LLM_CHAT_TEMPLATE_LLAMA_2;
|
||||||
|
// [variant] add BOS inside history
|
||||||
|
bool add_bos_inside_history = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS;
|
||||||
|
// [variant] trim spaces from the input message
|
||||||
|
bool strip_message = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP;
|
||||||
|
// construct the prompt
|
||||||
|
bool is_inside_turn = true; // skip BOS at the beginning
|
||||||
|
ss << "[INST] ";
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string content = strip_message ? trim(message->content) : message->content;
|
||||||
|
std::string role(message->role);
|
||||||
|
if (!is_inside_turn) {
|
||||||
|
is_inside_turn = true;
|
||||||
|
ss << (add_bos_inside_history ? "<s>[INST] " : "[INST] ");
|
||||||
|
}
|
||||||
|
if (role == "system") {
|
||||||
|
if (support_system_message) {
|
||||||
|
ss << "<<SYS>>\n" << content << "\n<</SYS>>\n\n";
|
||||||
|
} else {
|
||||||
|
// if the model does not support system message, we still include it in the first message, but without <<SYS>>
|
||||||
|
ss << content << "\n";
|
||||||
|
}
|
||||||
|
} else if (role == "user") {
|
||||||
|
ss << content << " [/INST]";
|
||||||
|
} else {
|
||||||
|
ss << content << "</s>";
|
||||||
|
is_inside_turn = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_PHI_3) {
|
||||||
|
// Phi 3
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
ss << "<|" << role << "|>\n" << message->content << "<|end|>\n";
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|assistant|>\n";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_PHI_4) {
|
||||||
|
// chatml template
|
||||||
|
for (auto message : chat) {
|
||||||
|
ss << "<|im_start|>" << message->role << "<|im_sep|>" << message->content << "<|im_end|>";
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|im_start|>assistant<|im_sep|>";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_FALCON_3) {
|
||||||
|
// Falcon 3
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
ss << "<|" << role << "|>\n" << message->content << "\n";
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|assistant|>\n";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_ZEPHYR) {
|
||||||
|
// zephyr template
|
||||||
|
for (auto message : chat) {
|
||||||
|
ss << "<|" << message->role << "|>" << "\n" << message->content << "<|endoftext|>\n";
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|assistant|>\n";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_MONARCH) {
|
||||||
|
// mlabonne/AlphaMonarch-7B template (the <s> is included inside history)
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string bos = (message == chat.front()) ? "" : "<s>"; // skip BOS for first message
|
||||||
|
ss << bos << message->role << "\n" << message->content << "</s>\n";
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<s>assistant\n";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_GEMMA) {
|
||||||
|
// google/gemma-7b-it
|
||||||
|
std::string system_prompt = "";
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
if (role == "system") {
|
||||||
|
// there is no system message for gemma, but we will merge it with user prompt, so nothing is broken
|
||||||
|
system_prompt = trim(message->content);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// in gemma, "assistant" is "model"
|
||||||
|
role = role == "assistant" ? "model" : message->role;
|
||||||
|
ss << "<start_of_turn>" << role << "\n";
|
||||||
|
if (!system_prompt.empty() && role != "model") {
|
||||||
|
ss << system_prompt << "\n\n";
|
||||||
|
system_prompt = "";
|
||||||
|
}
|
||||||
|
ss << trim(message->content) << "<end_of_turn>\n";
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<start_of_turn>model\n";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_ORION) {
|
||||||
|
// OrionStarAI/Orion-14B-Chat
|
||||||
|
std::string system_prompt = "";
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
if (role == "system") {
|
||||||
|
// there is no system message support, we will merge it with user prompt
|
||||||
|
system_prompt = message->content;
|
||||||
|
continue;
|
||||||
|
} else if (role == "user") {
|
||||||
|
ss << "Human: ";
|
||||||
|
if (!system_prompt.empty()) {
|
||||||
|
ss << system_prompt << "\n\n";
|
||||||
|
system_prompt = "";
|
||||||
|
}
|
||||||
|
ss << message->content << "\n\nAssistant: </s>";
|
||||||
|
} else {
|
||||||
|
ss << message->content << "</s>";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_OPENCHAT) {
|
||||||
|
// openchat/openchat-3.5-0106,
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
if (role == "system") {
|
||||||
|
ss << message->content << "<|end_of_turn|>";
|
||||||
|
} else {
|
||||||
|
role[0] = toupper(role[0]);
|
||||||
|
ss << "GPT4 Correct " << role << ": " << message->content << "<|end_of_turn|>";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "GPT4 Correct Assistant:";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_VICUNA || tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) {
|
||||||
|
// eachadea/vicuna-13b-1.1 (and Orca variant)
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
if (role == "system") {
|
||||||
|
// Orca-Vicuna variant uses a system prefix
|
||||||
|
if (tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) {
|
||||||
|
ss << "SYSTEM: " << message->content << "\n";
|
||||||
|
} else {
|
||||||
|
ss << message->content << "\n\n";
|
||||||
|
}
|
||||||
|
} else if (role == "user") {
|
||||||
|
ss << "USER: " << message->content << "\n";
|
||||||
|
} else if (role == "assistant") {
|
||||||
|
ss << "ASSISTANT: " << message->content << "</s>\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "ASSISTANT:";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK) {
|
||||||
|
// deepseek-ai/deepseek-coder-33b-instruct
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
if (role == "system") {
|
||||||
|
ss << message->content;
|
||||||
|
} else if (role == "user") {
|
||||||
|
ss << "### Instruction:\n" << message->content << "\n";
|
||||||
|
} else if (role == "assistant") {
|
||||||
|
ss << "### Response:\n" << message->content << "\n<|EOT|>\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "### Response:\n";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_COMMAND_R) {
|
||||||
|
// CohereForAI/c4ai-command-r-plus
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
if (role == "system") {
|
||||||
|
ss << "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
|
||||||
|
} else if (role == "user") {
|
||||||
|
ss << "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
|
||||||
|
} else if (role == "assistant") {
|
||||||
|
ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_LLAMA_3) {
|
||||||
|
// Llama 3
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
ss << "<|start_header_id|>" << role << "<|end_header_id|>\n\n" << trim(message->content) << "<|eot_id|>";
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|start_header_id|>assistant<|end_header_id|>\n\n";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_3) {
|
||||||
|
// chatglm3-6b
|
||||||
|
ss << "[gMASK]" << "sop";
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
ss << "<|" << role << "|>" << "\n " << message->content;
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|assistant|>";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_4) {
|
||||||
|
ss << "[gMASK]" << "<sop>";
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
ss << "<|" << role << "|>" << "\n" << message->content;
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|assistant|>";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) {
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
ss << "<|" << role << "|>" << "\n" << message->content;
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|assistant|>";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) {
|
||||||
|
// MiniCPM-3B-OpenHermes-2.5-v2-GGUF
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
if (role == "user") {
|
||||||
|
ss << LU8("<用户>");
|
||||||
|
ss << trim(message->content);
|
||||||
|
ss << "<AI>";
|
||||||
|
} else {
|
||||||
|
ss << trim(message->content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_2) {
|
||||||
|
// DeepSeek-V2
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
if (role == "system") {
|
||||||
|
ss << message->content << "\n\n";
|
||||||
|
} else if (role == "user") {
|
||||||
|
ss << "User: " << message->content << "\n\n";
|
||||||
|
} else if (role == "assistant") {
|
||||||
|
ss << "Assistant: " << message->content << LU8("<|end▁of▁sentence|>");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "Assistant:";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_3) {
|
||||||
|
// DeepSeek-V3
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
if (role == "system") {
|
||||||
|
ss << message->content << "\n\n";
|
||||||
|
} else if (role == "user") {
|
||||||
|
ss << LU8("<|User|>") << message->content;
|
||||||
|
} else if (role == "assistant") {
|
||||||
|
ss << LU8("<|Assistant|>") << message->content << LU8("<|end▁of▁sentence|>");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << LU8("<|Assistant|>");
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_3) {
|
||||||
|
// ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
|
||||||
|
// EXAONE-3.0-7.8B-Instruct
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
if (role == "system") {
|
||||||
|
ss << "[|system|]" << trim(message->content) << "[|endofturn|]\n";
|
||||||
|
} else if (role == "user") {
|
||||||
|
ss << "[|user|]" << trim(message->content) << "\n";
|
||||||
|
} else if (role == "assistant") {
|
||||||
|
ss << "[|assistant|]" << trim(message->content) << "[|endofturn|]\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "[|assistant|]";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) {
|
||||||
|
// this template requires the model to have "\n\n" as EOT token
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
if (role == "user") {
|
||||||
|
ss << "User: " << message->content << "\n\nAssistant:";
|
||||||
|
} else {
|
||||||
|
ss << message->content << "\n\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_GRANITE) {
|
||||||
|
// IBM Granite template
|
||||||
|
for (const auto & message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
ss << "<|start_of_role|>" << role << "<|end_of_role|>";
|
||||||
|
if (role == "assistant_tool_call") {
|
||||||
|
ss << "<|tool_call|>";
|
||||||
|
}
|
||||||
|
ss << message->content << "<|end_of_text|>\n";
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|start_of_role|>assistant<|end_of_role|>\n";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_GIGACHAT) {
|
||||||
|
// GigaChat template
|
||||||
|
bool has_system = !chat.empty() && std::string(chat[0]->role) == "system";
|
||||||
|
|
||||||
|
// Handle system message if present
|
||||||
|
if (has_system) {
|
||||||
|
ss << "<s>" << chat[0]->content << "<|message_sep|>";
|
||||||
|
} else {
|
||||||
|
ss << "<s>";
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process remaining messages
|
||||||
|
for (size_t i = has_system ? 1 : 0; i < chat.size(); i++) {
|
||||||
|
std::string role(chat[i]->role);
|
||||||
|
if (role == "user") {
|
||||||
|
ss << "user<|role_sep|>" << chat[i]->content << "<|message_sep|>"
|
||||||
|
<< "available functions<|role_sep|>[]<|message_sep|>";
|
||||||
|
} else if (role == "assistant") {
|
||||||
|
ss << "assistant<|role_sep|>" << chat[i]->content << "<|message_sep|>";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add generation prompt if needed
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "assistant<|role_sep|>";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_MEGREZ) {
|
||||||
|
// Megrez template
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
ss << "<|role_start|>" << role << "<|role_end|>" << message->content << "<|turn_end|>";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|role_start|>assistant<|role_end|>";
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// template not supported
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
dest = ss.str();
|
||||||
|
return dest.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
// public interface
|
||||||
|
|
||||||
|
int32_t llama_chat_builtin_templates(const char ** output, size_t len) {
|
||||||
|
auto it = LLM_CHAT_TEMPLATES.begin();
|
||||||
|
for (size_t i = 0; i < std::min(len, LLM_CHAT_TEMPLATES.size()); i++) {
|
||||||
|
output[i] = it->first.c_str();
|
||||||
|
std::advance(it, 1);
|
||||||
|
}
|
||||||
|
return (int32_t) LLM_CHAT_TEMPLATES.size();
|
||||||
|
}
|
||||||
|
|
53
examples/talk-llama/llama-chat.h
Normal file
53
examples/talk-llama/llama-chat.h
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
enum llm_chat_template {
|
||||||
|
LLM_CHAT_TEMPLATE_CHATML,
|
||||||
|
LLM_CHAT_TEMPLATE_LLAMA_2,
|
||||||
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS,
|
||||||
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS,
|
||||||
|
LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP,
|
||||||
|
LLM_CHAT_TEMPLATE_MISTRAL_V1,
|
||||||
|
LLM_CHAT_TEMPLATE_MISTRAL_V3,
|
||||||
|
LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN,
|
||||||
|
LLM_CHAT_TEMPLATE_MISTRAL_V7,
|
||||||
|
LLM_CHAT_TEMPLATE_PHI_3,
|
||||||
|
LLM_CHAT_TEMPLATE_PHI_4,
|
||||||
|
LLM_CHAT_TEMPLATE_FALCON_3,
|
||||||
|
LLM_CHAT_TEMPLATE_ZEPHYR,
|
||||||
|
LLM_CHAT_TEMPLATE_MONARCH,
|
||||||
|
LLM_CHAT_TEMPLATE_GEMMA,
|
||||||
|
LLM_CHAT_TEMPLATE_ORION,
|
||||||
|
LLM_CHAT_TEMPLATE_OPENCHAT,
|
||||||
|
LLM_CHAT_TEMPLATE_VICUNA,
|
||||||
|
LLM_CHAT_TEMPLATE_VICUNA_ORCA,
|
||||||
|
LLM_CHAT_TEMPLATE_DEEPSEEK,
|
||||||
|
LLM_CHAT_TEMPLATE_DEEPSEEK_2,
|
||||||
|
LLM_CHAT_TEMPLATE_DEEPSEEK_3,
|
||||||
|
LLM_CHAT_TEMPLATE_COMMAND_R,
|
||||||
|
LLM_CHAT_TEMPLATE_LLAMA_3,
|
||||||
|
LLM_CHAT_TEMPLATE_CHATGML_3,
|
||||||
|
LLM_CHAT_TEMPLATE_CHATGML_4,
|
||||||
|
LLM_CHAT_TEMPLATE_GLMEDGE,
|
||||||
|
LLM_CHAT_TEMPLATE_MINICPM,
|
||||||
|
LLM_CHAT_TEMPLATE_EXAONE_3,
|
||||||
|
LLM_CHAT_TEMPLATE_RWKV_WORLD,
|
||||||
|
LLM_CHAT_TEMPLATE_GRANITE,
|
||||||
|
LLM_CHAT_TEMPLATE_GIGACHAT,
|
||||||
|
LLM_CHAT_TEMPLATE_MEGREZ,
|
||||||
|
LLM_CHAT_TEMPLATE_UNKNOWN,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct llama_chat_message;
|
||||||
|
|
||||||
|
llm_chat_template llm_chat_template_from_str(const std::string & name);
|
||||||
|
|
||||||
|
llm_chat_template llm_chat_detect_template(const std::string & tmpl);
|
||||||
|
|
||||||
|
int32_t llm_chat_apply_template(
|
||||||
|
llm_chat_template tmpl,
|
||||||
|
const std::vector<const llama_chat_message *> & chat,
|
||||||
|
std::string & dest, bool add_ass);
|
1775
examples/talk-llama/llama-context.cpp
Normal file
1775
examples/talk-llama/llama-context.cpp
Normal file
File diff suppressed because it is too large
Load Diff
128
examples/talk-llama/llama-context.h
Normal file
128
examples/talk-llama/llama-context.h
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "llama.h"
|
||||||
|
#include "llama-batch.h"
|
||||||
|
#include "llama-cparams.h"
|
||||||
|
#include "llama-model.h"
|
||||||
|
#include "llama-kv-cache.h"
|
||||||
|
#include "llama-adapter.h"
|
||||||
|
|
||||||
|
#include "ggml-cpp.h"
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <vector>
|
||||||
|
#include <set>
|
||||||
|
|
||||||
|
struct llama_context {
|
||||||
|
llama_context(const llama_model & model)
|
||||||
|
: model(model)
|
||||||
|
, t_start_us(model.t_start_us)
|
||||||
|
, t_load_us(model.t_load_us) {}
|
||||||
|
|
||||||
|
const struct llama_model & model;
|
||||||
|
|
||||||
|
struct llama_cparams cparams;
|
||||||
|
struct llama_sbatch sbatch; // TODO: revisit if needed
|
||||||
|
struct llama_kv_cache kv_self;
|
||||||
|
struct llama_adapter_cvec cvec;
|
||||||
|
|
||||||
|
std::unordered_map<struct llama_adapter_lora *, float> lora;
|
||||||
|
|
||||||
|
std::vector<ggml_backend_ptr> backends;
|
||||||
|
std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns;
|
||||||
|
|
||||||
|
ggml_backend_t backend_cpu = nullptr;
|
||||||
|
|
||||||
|
ggml_threadpool_t threadpool = nullptr;
|
||||||
|
ggml_threadpool_t threadpool_batch = nullptr;
|
||||||
|
|
||||||
|
bool has_evaluated_once = false;
|
||||||
|
|
||||||
|
mutable int64_t t_start_us;
|
||||||
|
mutable int64_t t_load_us;
|
||||||
|
mutable int64_t t_p_eval_us = 0;
|
||||||
|
mutable int64_t t_eval_us = 0;
|
||||||
|
|
||||||
|
mutable int64_t t_compute_start_us = 0;
|
||||||
|
mutable int64_t n_queued_tokens = 0;
|
||||||
|
|
||||||
|
mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
|
||||||
|
mutable int32_t n_eval = 0; // number of eval calls
|
||||||
|
|
||||||
|
// host buffer for the model output (logits and embeddings)
|
||||||
|
ggml_backend_buffer_ptr buf_output;
|
||||||
|
|
||||||
|
// decode output (2-dimensional array: [n_outputs][n_vocab])
|
||||||
|
size_t logits_size = 0; // capacity (of floats) for logits
|
||||||
|
float * logits = nullptr;
|
||||||
|
|
||||||
|
std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
|
||||||
|
size_t output_size = 0; // capacity (of tokens positions) for the output buffers
|
||||||
|
int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
|
||||||
|
|
||||||
|
bool logits_all = false;
|
||||||
|
|
||||||
|
// embeddings output (2-dimensional array: [n_outputs][n_embd])
|
||||||
|
// populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
|
||||||
|
size_t embd_size = 0; // capacity (of floats) for embeddings
|
||||||
|
float * embd = nullptr;
|
||||||
|
|
||||||
|
// sequence embeddings output (map of [n_embd] vectors)
|
||||||
|
// populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
|
||||||
|
std::map<llama_seq_id, std::vector<float>> embd_seq;
|
||||||
|
|
||||||
|
// whether we are computing encoder output or decoder output
|
||||||
|
bool is_encoding = false;
|
||||||
|
|
||||||
|
// TODO: find a better way to accommodate mutli-dimension position encoding methods
|
||||||
|
// number of position id each token get, 1 for each token in most cases.
|
||||||
|
// when using m-rope, it will be 3 position ids per token to representing 3 dimension coordinate.
|
||||||
|
int n_pos_per_token = 1;
|
||||||
|
|
||||||
|
// output of the encoder part of the encoder-decoder models
|
||||||
|
std::vector<float> embd_enc;
|
||||||
|
std::vector<std::set<llama_seq_id>> seq_ids_enc;
|
||||||
|
|
||||||
|
// memory buffers used to evaluate the model
|
||||||
|
std::vector<uint8_t> buf_compute_meta;
|
||||||
|
ggml_backend_sched_ptr sched;
|
||||||
|
|
||||||
|
ggml_abort_callback abort_callback = nullptr;
|
||||||
|
void * abort_callback_data = nullptr;
|
||||||
|
|
||||||
|
// input tensors
|
||||||
|
struct ggml_tensor * inp_tokens; // I32 [n_batch]
|
||||||
|
struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
|
||||||
|
struct ggml_tensor * inp_pos; // I32 [n_batch]
|
||||||
|
struct ggml_tensor * inp_out_ids; // I32 [n_outputs]
|
||||||
|
struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch]
|
||||||
|
struct ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch]
|
||||||
|
struct ggml_tensor * inp_K_shift; // I32 [kv_size]
|
||||||
|
struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
|
||||||
|
struct ggml_tensor * inp_cls; // I32 [n_batch]
|
||||||
|
struct ggml_tensor * inp_s_copy; // I32 [kv_size]
|
||||||
|
struct ggml_tensor * inp_s_mask; // F32 [1, n_kv]
|
||||||
|
struct ggml_tensor * inp_s_seq; // I32 [n_kv, n_batch]
|
||||||
|
struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch]
|
||||||
|
struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc]
|
||||||
|
struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: make these methods of llama_context
|
||||||
|
void llama_set_k_shift(struct llama_context & lctx);
|
||||||
|
|
||||||
|
void llama_set_s_copy(struct llama_context & lctx);
|
||||||
|
|
||||||
|
void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch);
|
||||||
|
|
||||||
|
// Make sure enough space is available for outputs.
|
||||||
|
// Returns max number of outputs for which space was reserved.
|
||||||
|
size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs);
|
||||||
|
|
||||||
|
// make the outputs have the same order they had in the user-provided batch
|
||||||
|
void llama_output_reorder(struct llama_context & ctx);
|
||||||
|
|
||||||
|
// For internal test use
|
||||||
|
// TODO: remove
|
||||||
|
const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(struct llama_context * ctx);
|
1
examples/talk-llama/llama-cparams.cpp
Normal file
1
examples/talk-llama/llama-cparams.cpp
Normal file
@ -0,0 +1 @@
|
|||||||
|
#include "llama-cparams.h"
|
37
examples/talk-llama/llama-cparams.h
Normal file
37
examples/talk-llama/llama-cparams.h
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
struct llama_cparams {
|
||||||
|
uint32_t n_ctx; // context size used during inference
|
||||||
|
uint32_t n_batch;
|
||||||
|
uint32_t n_ubatch;
|
||||||
|
uint32_t n_seq_max;
|
||||||
|
int n_threads; // number of threads to use for generation
|
||||||
|
int n_threads_batch; // number of threads to use for batch processing
|
||||||
|
|
||||||
|
float rope_freq_base;
|
||||||
|
float rope_freq_scale;
|
||||||
|
|
||||||
|
uint32_t n_ctx_orig_yarn;
|
||||||
|
// These hyperparameters are not exposed in GGUF, because all
|
||||||
|
// existing YaRN models use the same values for them.
|
||||||
|
float yarn_ext_factor;
|
||||||
|
float yarn_attn_factor;
|
||||||
|
float yarn_beta_fast;
|
||||||
|
float yarn_beta_slow;
|
||||||
|
float defrag_thold;
|
||||||
|
|
||||||
|
bool embeddings;
|
||||||
|
bool causal_attn;
|
||||||
|
bool offload_kqv;
|
||||||
|
bool flash_attn;
|
||||||
|
bool no_perf;
|
||||||
|
|
||||||
|
enum llama_pooling_type pooling_type;
|
||||||
|
|
||||||
|
ggml_backend_sched_eval_callback cb_eval;
|
||||||
|
void * cb_eval_user_data;
|
||||||
|
};
|
@ -1,5 +1,6 @@
|
|||||||
#include "llama-grammar.h"
|
#include "llama-grammar.h"
|
||||||
|
|
||||||
|
#include "llama-impl.h"
|
||||||
#include "llama-vocab.h"
|
#include "llama-vocab.h"
|
||||||
#include "llama-sampling.h"
|
#include "llama-sampling.h"
|
||||||
|
|
||||||
@ -559,7 +560,7 @@ bool llama_grammar_parser::parse(const char * src) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (const std::exception & err) {
|
} catch (const std::exception & err) {
|
||||||
fprintf(stderr, "%s: error parsing grammar: %s\n", __func__, err.what());
|
fprintf(stderr, "%s: error parsing grammar: %s\n\n%s\n", __func__, err.what(), src);
|
||||||
rules.clear();
|
rules.clear();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -822,15 +823,11 @@ llama_grammar_stacks & llama_grammar_get_stacks(struct llama_grammar * grammar)
|
|||||||
return grammar->stacks;
|
return grammar->stacks;
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_grammar_accept(
|
void llama_grammar_accept(struct llama_grammar * grammar, uint32_t chr) {
|
||||||
const llama_grammar_rules & rules,
|
llama_grammar_stacks stacks_new;
|
||||||
const llama_grammar_stacks & stacks,
|
stacks_new.reserve(grammar->stacks.size());
|
||||||
const uint32_t chr,
|
|
||||||
llama_grammar_stacks & stacks_new) {
|
|
||||||
stacks_new.clear();
|
|
||||||
stacks_new.reserve(stacks.size());
|
|
||||||
|
|
||||||
for (const auto & stack : stacks) {
|
for (const auto & stack : grammar->stacks) {
|
||||||
if (stack.empty()) {
|
if (stack.empty()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -844,9 +841,11 @@ void llama_grammar_accept(
|
|||||||
if (!llama_grammar_is_end_of_sequence(pos)) {
|
if (!llama_grammar_is_end_of_sequence(pos)) {
|
||||||
new_stack.push_back(pos);
|
new_stack.push_back(pos);
|
||||||
}
|
}
|
||||||
llama_grammar_advance_stack(rules, new_stack, stacks_new);
|
llama_grammar_advance_stack(grammar->rules, new_stack, stacks_new);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
grammar->stacks = std::move(stacks_new);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_grammar_candidates llama_grammar_reject_candidates_for_stack(
|
llama_grammar_candidates llama_grammar_reject_candidates_for_stack(
|
||||||
@ -961,10 +960,28 @@ struct llama_grammar * llama_grammar_init_impl(
|
|||||||
// Important: vec_rules has to be moved here, not copied, because stacks contains
|
// Important: vec_rules has to be moved here, not copied, because stacks contains
|
||||||
// pointers to elements of vec_rules. If vec_rules were copied into llama_grammar
|
// pointers to elements of vec_rules. If vec_rules were copied into llama_grammar
|
||||||
// then the pointers would be invalidated when the local vec_rules goes out of scope.
|
// then the pointers would be invalidated when the local vec_rules goes out of scope.
|
||||||
return new llama_grammar { vocab, std::move(vec_rules), std::move(stacks), {}, };
|
return new llama_grammar {
|
||||||
|
vocab,
|
||||||
|
std::move(vec_rules),
|
||||||
|
std::move(stacks),
|
||||||
|
/* .partial_utf8 = */ {},
|
||||||
|
/* .lazy =*/ false,
|
||||||
|
/* .awaiting_trigger = */ false,
|
||||||
|
/* .trigger_buffer = */ "",
|
||||||
|
/* .trigger_tokens = */ {},
|
||||||
|
/* .trigger_words = */ {},
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
struct llama_grammar * llama_grammar_init_impl(const struct llama_vocab * vocab, const char * grammar_str, const char * grammar_root) {
|
struct llama_grammar * llama_grammar_init_impl(
|
||||||
|
const struct llama_vocab * vocab,
|
||||||
|
const char * grammar_str,
|
||||||
|
const char * grammar_root,
|
||||||
|
bool lazy,
|
||||||
|
const char ** trigger_words,
|
||||||
|
size_t num_trigger_words,
|
||||||
|
const llama_token * trigger_tokens,
|
||||||
|
size_t num_trigger_tokens) {
|
||||||
llama_grammar_parser parser;
|
llama_grammar_parser parser;
|
||||||
|
|
||||||
// if there is a grammar, parse it
|
// if there is a grammar, parse it
|
||||||
@ -1036,10 +1053,31 @@ struct llama_grammar * llama_grammar_init_impl(const struct llama_vocab * vocab,
|
|||||||
}
|
}
|
||||||
} while (true);
|
} while (true);
|
||||||
|
|
||||||
|
std::vector<llama_token> vec_trigger_tokens;
|
||||||
|
std::vector<std::string> vec_trigger_words;
|
||||||
|
for (size_t i = 0; i < num_trigger_tokens; i++) {
|
||||||
|
GGML_ASSERT(trigger_tokens != nullptr);
|
||||||
|
vec_trigger_tokens.push_back(trigger_tokens[i]);
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < num_trigger_words; i++) {
|
||||||
|
GGML_ASSERT(trigger_words != nullptr);
|
||||||
|
vec_trigger_words.push_back(trigger_words[i]);
|
||||||
|
}
|
||||||
|
|
||||||
// Important: vec_rules has to be moved here, not copied, because stacks contains
|
// Important: vec_rules has to be moved here, not copied, because stacks contains
|
||||||
// pointers to elements of vec_rules. If vec_rules were copied into llama_grammar
|
// pointers to elements of vec_rules. If vec_rules were copied into llama_grammar
|
||||||
// then the pointers would be invalidated when the local vec_rules goes out of scope.
|
// then the pointers would be invalidated when the local vec_rules goes out of scope.
|
||||||
return new llama_grammar { vocab, std::move(vec_rules), std::move(stacks), {}, };
|
return new llama_grammar {
|
||||||
|
vocab,
|
||||||
|
std::move(vec_rules),
|
||||||
|
std::move(stacks),
|
||||||
|
/* .partial_utf8 = */ {},
|
||||||
|
/* .lazy = */ lazy,
|
||||||
|
/* .awaiting_trigger = */ lazy,
|
||||||
|
/* .trigger_buffer = */ "",
|
||||||
|
std::move(vec_trigger_tokens),
|
||||||
|
std::move(vec_trigger_words),
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_grammar_free_impl(struct llama_grammar * grammar) {
|
void llama_grammar_free_impl(struct llama_grammar * grammar) {
|
||||||
@ -1051,7 +1089,17 @@ void llama_grammar_free_impl(struct llama_grammar * grammar) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & grammar) {
|
struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & grammar) {
|
||||||
llama_grammar * result = new llama_grammar { grammar.vocab, grammar.rules, grammar.stacks, grammar.partial_utf8, };
|
llama_grammar * result = new llama_grammar {
|
||||||
|
grammar.vocab,
|
||||||
|
grammar.rules,
|
||||||
|
grammar.stacks,
|
||||||
|
grammar.partial_utf8,
|
||||||
|
grammar.lazy,
|
||||||
|
grammar.awaiting_trigger,
|
||||||
|
grammar.trigger_buffer,
|
||||||
|
grammar.trigger_tokens,
|
||||||
|
grammar.trigger_words,
|
||||||
|
};
|
||||||
|
|
||||||
// redirect elements in stacks to point to new rules
|
// redirect elements in stacks to point to new rules
|
||||||
for (size_t is = 0; is < result->stacks.size(); is++) {
|
for (size_t is = 0; is < result->stacks.size(); is++) {
|
||||||
@ -1059,7 +1107,7 @@ struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & gra
|
|||||||
for (size_t ir0 = 0; ir0 < grammar.rules.size(); ir0++) {
|
for (size_t ir0 = 0; ir0 < grammar.rules.size(); ir0++) {
|
||||||
for (size_t ir1 = 0; ir1 < grammar.rules[ir0].size(); ir1++) {
|
for (size_t ir1 = 0; ir1 < grammar.rules[ir0].size(); ir1++) {
|
||||||
if (grammar.stacks[is][ie] == &grammar.rules[ir0][ir1]) {
|
if (grammar.stacks[is][ie] == &grammar.rules[ir0][ir1]) {
|
||||||
result->stacks[is][ie] = &result->rules[ir0][ir1];
|
result->stacks[is][ie] = &result->rules[ir0][ir1];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1072,6 +1120,10 @@ struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & gra
|
|||||||
void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_data_array * cur_p) {
|
void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_data_array * cur_p) {
|
||||||
GGML_ASSERT(grammar.vocab != nullptr);
|
GGML_ASSERT(grammar.vocab != nullptr);
|
||||||
|
|
||||||
|
if (grammar.awaiting_trigger) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
bool allow_eog = false;
|
bool allow_eog = false;
|
||||||
for (const auto & stack : grammar.stacks) {
|
for (const auto & stack : grammar.stacks) {
|
||||||
if (stack.empty()) {
|
if (stack.empty()) {
|
||||||
@ -1088,9 +1140,9 @@ void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_
|
|||||||
|
|
||||||
for (size_t i = 0; i < cur_p->size; ++i) {
|
for (size_t i = 0; i < cur_p->size; ++i) {
|
||||||
const llama_token id = cur_p->data[i].id;
|
const llama_token id = cur_p->data[i].id;
|
||||||
const std::string & piece = grammar.vocab->cache_token_to_piece.at(id);
|
const std::string & piece = grammar.vocab->token_to_piece(id);
|
||||||
|
|
||||||
if (llama_token_is_eog_impl(*grammar.vocab, id)) {
|
if (grammar.vocab->is_eog(id)) {
|
||||||
if (!allow_eog) {
|
if (!allow_eog) {
|
||||||
cur_p->data[i].logit = -INFINITY;
|
cur_p->data[i].logit = -INFINITY;
|
||||||
}
|
}
|
||||||
@ -1111,7 +1163,35 @@ void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_
|
|||||||
void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token) {
|
void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token) {
|
||||||
GGML_ASSERT(grammar.vocab != nullptr);
|
GGML_ASSERT(grammar.vocab != nullptr);
|
||||||
|
|
||||||
if (llama_token_is_eog_impl(*grammar.vocab, token)) {
|
const auto & piece = grammar.vocab->token_to_piece(token);
|
||||||
|
|
||||||
|
if (grammar.awaiting_trigger) {
|
||||||
|
if (std::find(grammar.trigger_tokens.begin(), grammar.trigger_tokens.end(), token) != grammar.trigger_tokens.end()) {
|
||||||
|
grammar.awaiting_trigger = false;
|
||||||
|
grammar.trigger_buffer.clear();
|
||||||
|
llama_grammar_accept_str(grammar, piece);
|
||||||
|
LLAMA_LOG_DEBUG("Grammar triggered on token %u (`%s`)", token, piece.c_str());
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
// TODO: consider a smarter incremental substring search algorithm (store last position to search from).
|
||||||
|
grammar.trigger_buffer += piece;
|
||||||
|
for (const auto & word : grammar.trigger_words) {
|
||||||
|
auto pos = grammar.trigger_buffer.find(word);
|
||||||
|
if (pos != std::string::npos) {
|
||||||
|
grammar.awaiting_trigger = false;
|
||||||
|
auto constrained_str = grammar.trigger_buffer.substr(pos);
|
||||||
|
grammar.trigger_buffer.clear();
|
||||||
|
llama_grammar_accept_str(grammar, constrained_str);
|
||||||
|
LLAMA_LOG_DEBUG("Grammar triggered on word `%s`", word.c_str());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LLAMA_LOG_DEBUG("Grammar still awaiting trigger after token %d (`%s`) (buffer: `%s`)\n", token, piece.c_str(), grammar.trigger_buffer.c_str());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (grammar.vocab->is_eog(token)) {
|
||||||
for (const auto & stack : grammar.stacks) {
|
for (const auto & stack : grammar.stacks) {
|
||||||
if (stack.empty()) {
|
if (stack.empty()) {
|
||||||
return;
|
return;
|
||||||
@ -1120,19 +1200,20 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token
|
|||||||
GGML_ABORT("fatal error");
|
GGML_ABORT("fatal error");
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::string & piece = grammar.vocab->cache_token_to_piece.at(token);
|
llama_grammar_accept_str(grammar, piece);
|
||||||
|
}
|
||||||
|
|
||||||
|
void llama_grammar_accept_str(struct llama_grammar & grammar, const std::string & piece) {
|
||||||
// Note terminating 0 in decoded string
|
// Note terminating 0 in decoded string
|
||||||
const auto decoded = decode_utf8(piece, grammar.partial_utf8);
|
const auto decoded = decode_utf8(piece, grammar.partial_utf8);
|
||||||
const auto & code_points = decoded.first;
|
const auto & code_points = decoded.first;
|
||||||
|
|
||||||
llama_grammar_stacks stacks_new;
|
|
||||||
|
|
||||||
for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
|
for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
|
||||||
llama_grammar_accept(grammar.rules, grammar.stacks, *it, stacks_new);
|
llama_grammar_accept(&grammar, *it);
|
||||||
grammar.stacks = std::move(stacks_new);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
grammar.partial_utf8 = decoded.second;
|
grammar.partial_utf8 = decoded.second;
|
||||||
GGML_ASSERT(!grammar.stacks.empty());
|
if (grammar.stacks.empty()) {
|
||||||
|
throw std::runtime_error("Unexpected empty grammar stack after accepting piece: " + piece);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "llama-impl.h"
|
#include "llama.h"
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
struct llama_vocab;
|
struct llama_vocab;
|
||||||
|
|
||||||
@ -58,6 +60,7 @@ using llama_grammar_rules = std::vector<llama_grammar_rule>;
|
|||||||
using llama_grammar_stacks = std::vector<llama_grammar_stack>;
|
using llama_grammar_stacks = std::vector<llama_grammar_stack>;
|
||||||
using llama_grammar_candidates = std::vector<llama_grammar_candidate>;
|
using llama_grammar_candidates = std::vector<llama_grammar_candidate>;
|
||||||
|
|
||||||
|
// TODO: remove, needed for tests atm
|
||||||
const llama_grammar_rules & llama_grammar_get_rules (const struct llama_grammar * grammar);
|
const llama_grammar_rules & llama_grammar_get_rules (const struct llama_grammar * grammar);
|
||||||
llama_grammar_stacks & llama_grammar_get_stacks( struct llama_grammar * grammar);
|
llama_grammar_stacks & llama_grammar_get_stacks( struct llama_grammar * grammar);
|
||||||
|
|
||||||
@ -65,11 +68,7 @@ const llama_grammar_rules & llama_grammar_get_rules (const struct llama_grammar
|
|||||||
// be positioned at a character range (see `llama_grammar_advance_stack`), and
|
// be positioned at a character range (see `llama_grammar_advance_stack`), and
|
||||||
// produces the N possible stacks if the given char is accepted at those
|
// produces the N possible stacks if the given char is accepted at those
|
||||||
// positions
|
// positions
|
||||||
void llama_grammar_accept(
|
void llama_grammar_accept(struct llama_grammar * grammar, uint32_t chr);
|
||||||
const llama_grammar_rules & rules,
|
|
||||||
const llama_grammar_stacks & stacks,
|
|
||||||
uint32_t chr,
|
|
||||||
llama_grammar_stacks & stacks_new);
|
|
||||||
|
|
||||||
std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
|
std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
|
||||||
const llama_grammar_rules & rules,
|
const llama_grammar_rules & rules,
|
||||||
@ -115,6 +114,15 @@ struct llama_grammar {
|
|||||||
|
|
||||||
// buffer for partially generated UTF-8 sequence from accepted tokens
|
// buffer for partially generated UTF-8 sequence from accepted tokens
|
||||||
llama_partial_utf8 partial_utf8;
|
llama_partial_utf8 partial_utf8;
|
||||||
|
|
||||||
|
// lazy grammars wait for trigger words or tokens before constraining the sampling.
|
||||||
|
// we still ahve trigger_tokens for non-lazy grammars to force printing of special trigger tokens.
|
||||||
|
// (useful e.g. for tool_choice=required)
|
||||||
|
bool lazy = false;
|
||||||
|
bool awaiting_trigger = false; // Initialized to true for lazy grammars only
|
||||||
|
std::string trigger_buffer; // Output buffered by lazy grammar. Will be cleared once trigger is found.
|
||||||
|
std::vector<llama_token> trigger_tokens; // Tokens that trigger a lazy grammar, or tokens to force printing of (even if special).
|
||||||
|
std::vector<std::string> trigger_words;
|
||||||
};
|
};
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -128,7 +136,15 @@ struct llama_grammar * llama_grammar_init_impl(
|
|||||||
size_t n_rules,
|
size_t n_rules,
|
||||||
size_t start_rule_index);
|
size_t start_rule_index);
|
||||||
|
|
||||||
struct llama_grammar * llama_grammar_init_impl(const struct llama_vocab * vocab, const char * grammar_str, const char * grammar_root);
|
struct llama_grammar * llama_grammar_init_impl(
|
||||||
|
const struct llama_vocab * vocab,
|
||||||
|
const char * grammar_str,
|
||||||
|
const char * grammar_root,
|
||||||
|
bool lazy,
|
||||||
|
const char ** trigger_words,
|
||||||
|
size_t num_trigger_words,
|
||||||
|
const llama_token * trigger_tokens,
|
||||||
|
size_t num_trigger_tokens);
|
||||||
|
|
||||||
void llama_grammar_free_impl(struct llama_grammar * grammar);
|
void llama_grammar_free_impl(struct llama_grammar * grammar);
|
||||||
|
|
||||||
@ -142,3 +158,7 @@ void llama_grammar_apply_impl(
|
|||||||
void llama_grammar_accept_impl(
|
void llama_grammar_accept_impl(
|
||||||
struct llama_grammar & grammar,
|
struct llama_grammar & grammar,
|
||||||
llama_token token);
|
llama_token token);
|
||||||
|
|
||||||
|
void llama_grammar_accept_str(
|
||||||
|
struct llama_grammar & grammar,
|
||||||
|
const std::string & piece);
|
||||||
|
71
examples/talk-llama/llama-hparams.cpp
Normal file
71
examples/talk-llama/llama-hparams.cpp
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
#include "llama-hparams.h"
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
|
uint32_t llama_hparams::n_head(uint32_t il) const {
|
||||||
|
if (il < n_layer) {
|
||||||
|
return n_head_arr[il];
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_ABORT("fatal error");
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t llama_hparams::n_head_kv(uint32_t il) const {
|
||||||
|
if (il < n_layer) {
|
||||||
|
return n_head_kv_arr[il];
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_ABORT("fatal error");
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t llama_hparams::n_ff(uint32_t il) const {
|
||||||
|
if (il < n_layer) {
|
||||||
|
return n_ff_arr[il];
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_ABORT("fatal error");
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t llama_hparams::n_gqa(uint32_t il) const {
|
||||||
|
const uint32_t n_head = this->n_head(il);
|
||||||
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
||||||
|
|
||||||
|
if (n_head_kv == 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return n_head/n_head_kv;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t llama_hparams::n_embd_k_gqa(uint32_t il) const {
|
||||||
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
||||||
|
|
||||||
|
return n_embd_head_k * n_head_kv;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const {
|
||||||
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
||||||
|
|
||||||
|
return n_embd_head_v * n_head_kv;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t llama_hparams::n_embd_k_s() const {
|
||||||
|
if (wkv_head_size != 0) {
|
||||||
|
// for RWKV models
|
||||||
|
return token_shift_count * n_embd;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: maybe support other convolution strides than 1
|
||||||
|
// NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
|
||||||
|
return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t llama_hparams::n_embd_v_s() const {
|
||||||
|
if (wkv_head_size != 0) {
|
||||||
|
// corresponds to RWKV's wkv_states size
|
||||||
|
return n_embd * wkv_head_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
// corresponds to Mamba's ssm_states size
|
||||||
|
return ssm_d_state * ssm_d_inner;
|
||||||
|
}
|
139
examples/talk-llama/llama-hparams.h
Normal file
139
examples/talk-llama/llama-hparams.h
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
|
||||||
|
// bump if necessary
|
||||||
|
#define LLAMA_MAX_LAYERS 512
|
||||||
|
#define LLAMA_MAX_EXPERTS 256 // DeepSeekV3
|
||||||
|
|
||||||
|
enum llama_expert_gating_func_type {
|
||||||
|
LLAMA_EXPERT_GATING_FUNC_TYPE_NONE = 0,
|
||||||
|
LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX = 1,
|
||||||
|
LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct llama_hparams_posnet {
|
||||||
|
uint32_t n_embd;
|
||||||
|
uint32_t n_layer;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct llama_hparams_convnext {
|
||||||
|
uint32_t n_embd;
|
||||||
|
uint32_t n_layer;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct llama_hparams {
|
||||||
|
bool vocab_only;
|
||||||
|
bool rope_finetuned;
|
||||||
|
bool use_par_res;
|
||||||
|
bool swin_norm;
|
||||||
|
|
||||||
|
uint32_t n_ctx_train; // context size the model was trained on
|
||||||
|
uint32_t n_embd;
|
||||||
|
uint32_t n_embd_features = 0;
|
||||||
|
uint32_t n_layer;
|
||||||
|
uint32_t n_rot;
|
||||||
|
uint32_t n_swa = 0; // sliding window attention (SWA)
|
||||||
|
uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
|
||||||
|
uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
|
||||||
|
uint32_t n_expert = 0;
|
||||||
|
uint32_t n_expert_used = 0;
|
||||||
|
uint32_t n_rel_attn_bkts = 0;
|
||||||
|
|
||||||
|
// for WavTokenizer
|
||||||
|
struct llama_hparams_posnet posnet;
|
||||||
|
struct llama_hparams_convnext convnext;
|
||||||
|
|
||||||
|
std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_arr;
|
||||||
|
std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_kv_arr;
|
||||||
|
std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr;
|
||||||
|
|
||||||
|
uint32_t n_layer_dense_lead = 0;
|
||||||
|
uint32_t n_lora_q = 0;
|
||||||
|
uint32_t n_lora_kv = 0;
|
||||||
|
uint32_t n_ff_exp = 0;
|
||||||
|
uint32_t n_ff_shexp = 0;
|
||||||
|
uint32_t n_expert_shared = 0;
|
||||||
|
uint32_t n_norm_groups = 0;
|
||||||
|
|
||||||
|
float expert_weights_scale = 0.0;
|
||||||
|
bool expert_weights_norm = false;
|
||||||
|
uint32_t expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_NONE;
|
||||||
|
|
||||||
|
float f_norm_eps;
|
||||||
|
float f_norm_rms_eps;
|
||||||
|
float f_norm_group_eps;
|
||||||
|
|
||||||
|
float f_attn_logit_softcapping = 50.0f;
|
||||||
|
float f_final_logit_softcapping = 30.0f;
|
||||||
|
|
||||||
|
// for RWKV
|
||||||
|
uint32_t rescale_every_n_layers = 0;
|
||||||
|
uint32_t time_mix_extra_dim = 0;
|
||||||
|
uint32_t time_decay_extra_dim = 0;
|
||||||
|
uint32_t wkv_head_size = 0;
|
||||||
|
uint32_t token_shift_count = 2;
|
||||||
|
|
||||||
|
float rope_attn_factor = 1.0f;
|
||||||
|
float rope_freq_base_train;
|
||||||
|
float rope_freq_scale_train;
|
||||||
|
uint32_t n_ctx_orig_yarn;
|
||||||
|
float rope_yarn_log_mul;
|
||||||
|
|
||||||
|
std::array<int, 4> rope_sections;
|
||||||
|
|
||||||
|
// for State Space Models
|
||||||
|
uint32_t ssm_d_conv = 0;
|
||||||
|
uint32_t ssm_d_inner = 0;
|
||||||
|
uint32_t ssm_d_state = 0;
|
||||||
|
uint32_t ssm_dt_rank = 0;
|
||||||
|
|
||||||
|
bool ssm_dt_b_c_rms = false;
|
||||||
|
|
||||||
|
float f_clamp_kqv = 0.0f;
|
||||||
|
float f_max_alibi_bias = 0.0f;
|
||||||
|
float f_logit_scale = 0.0f;
|
||||||
|
|
||||||
|
// Additional scale factors (Granite/Granite MoE)
|
||||||
|
float f_residual_scale = 0.0f;
|
||||||
|
float f_embedding_scale = 0.0f;
|
||||||
|
float f_attention_scale = 0.0f;
|
||||||
|
|
||||||
|
bool causal_attn = true;
|
||||||
|
bool use_alibi = false;
|
||||||
|
bool attn_soft_cap = false;
|
||||||
|
|
||||||
|
// needed by encoder-decoder models (e.g. T5, FLAN-T5)
|
||||||
|
// ref: https://github.com/ggerganov/llama.cpp/pull/8141
|
||||||
|
llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
|
||||||
|
|
||||||
|
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE;
|
||||||
|
enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
|
||||||
|
enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
|
||||||
|
|
||||||
|
uint32_t n_head(uint32_t il = 0) const;
|
||||||
|
|
||||||
|
uint32_t n_head_kv(uint32_t il = 0) const;
|
||||||
|
|
||||||
|
uint32_t n_ff(uint32_t il = 0) const;
|
||||||
|
|
||||||
|
uint32_t n_gqa(uint32_t il = 0) const;
|
||||||
|
|
||||||
|
// dimension of key embeddings across all k-v heads
|
||||||
|
uint32_t n_embd_k_gqa(uint32_t il = 0) const;
|
||||||
|
|
||||||
|
// dimension of value embeddings across all k-v heads
|
||||||
|
uint32_t n_embd_v_gqa(uint32_t il = 0) const;
|
||||||
|
|
||||||
|
// dimension of the rolling state embeddings
|
||||||
|
// corresponds to Mamba's conv_states size or RWKV's token_shift states size
|
||||||
|
uint32_t n_embd_k_s() const;
|
||||||
|
|
||||||
|
// dimension of the recurrent state embeddings
|
||||||
|
uint32_t n_embd_v_s() const;
|
||||||
|
};
|
||||||
|
|
||||||
|
static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable");
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user