mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-07-02 07:20:13 +02:00
Compare commits
728 Commits
gg/cuda-no
...
v1.7.4
Author | SHA1 | Date | |
---|---|---|---|
8a9ad7844d | |||
eb874b3a3c | |||
eb78e3a3f1 | |||
ece3ff88f6 | |||
9366544991 | |||
95583942ed | |||
2e93cb6a2f | |||
de5cd60d1c | |||
3fcba3e58b | |||
cea5f1c52f | |||
2112462db4 | |||
fc84ecd445 | |||
8de1e99907 | |||
499af9294a | |||
bcf937c216 | |||
b8d90953d7 | |||
60a422147b | |||
3387415bad | |||
536ca3ec89 | |||
a4bb983190 | |||
39c205f555 | |||
6d502f33dc | |||
5ea27d089d | |||
1462d92588 | |||
7ba1a41f47 | |||
5ea088636f | |||
f32ddb3b1c | |||
79b75ece03 | |||
6348d73e55 | |||
fb36a1538a | |||
c81b8b910b | |||
85b60f31d0 | |||
227b5ffa36 | |||
36a64a253f | |||
c84b83c370 | |||
5136fd92c2 | |||
7d55637f0b | |||
0994506054 | |||
53c9a3a984 | |||
ed09075ca0 | |||
f07a81aa9f | |||
4183517076 | |||
f4668169a0 | |||
944ce49439 | |||
2e59dced12 | |||
e4e05981d6 | |||
3de9deead5 | |||
47f989f9b3 | |||
acc4e13dee | |||
ba6c2a8fd9 | |||
6576af00d7 | |||
8ac5db0169 | |||
61edb117a0 | |||
eb97b257eb | |||
479499dc0e | |||
d420a759c5 | |||
a1ab9b5e91 | |||
e22d38e4f2 | |||
856fbaa92f | |||
2c05efa4b1 | |||
c21fb10b28 | |||
26c9fd0cdc | |||
e6eed605cf | |||
abe3102cb7 | |||
1193e494a9 | |||
e5e951672e | |||
0e24559ad9 | |||
527ac800cf | |||
479bd77169 | |||
d8bf63a41b | |||
b82c8d76dc | |||
86346f811e | |||
c635f40a34 | |||
e0be0de1ee | |||
60dc6d003f | |||
eb27e0d834 | |||
a682fdce0c | |||
9ffbd3d969 | |||
6585a890b4 | |||
d0a050b51f | |||
e990d1b791 | |||
4a6d52efe6 | |||
8b841d430a | |||
b74b68212a | |||
3a27b2b91b | |||
d34445e960 | |||
f897eb7670 | |||
2f2841bfce | |||
09a1b61218 | |||
94e7da1ff2 | |||
c4aed6831e | |||
199579652e | |||
d17e7139d8 | |||
6a52eaea74 | |||
6aa1d7b892 | |||
262e865a70 | |||
ed733e85a1 | |||
5980b1ae77 | |||
0415a66044 | |||
7d134e3737 | |||
9df53b357e | |||
b2115b4d9b | |||
0164427dd5 | |||
627b11c78a | |||
472464453d | |||
11dddfbc9e | |||
384e214cc7 | |||
f2c680f893 | |||
fbe66da0e5 | |||
a815940e0e | |||
904e307bce | |||
491ec076b4 | |||
966433fdf2 | |||
6f1ba9d82d | |||
015ecd0001 | |||
b7c64a4352 | |||
7895d39508 | |||
22616f00f9 | |||
02c6fcbc2c | |||
3daeacad24 | |||
4d73962da4 | |||
068812650e | |||
4b7e059e15 | |||
30e35d7271 | |||
3623bd58f2 | |||
cb847c20a7 | |||
964b154a2a | |||
d7c2a04bce | |||
2bb4ca9cba | |||
a753a82462 | |||
276b08d8f0 | |||
4ca1e72fe0 | |||
16a66f103f | |||
330273901f | |||
42099a9342 | |||
90dd5fca9c | |||
2490f2a7f8 | |||
230e985633 | |||
ae24083f23 | |||
6463e36369 | |||
b3301f7d82 | |||
ab5d4d93ec | |||
2d6e9dd723 | |||
2f16e51553 | |||
0f0994902f | |||
5e1fcc1780 | |||
48f421de23 | |||
e7afb2b991 | |||
9a5ef7b169 | |||
453cc0fcf1 | |||
78dfec6bc5 | |||
f6d518fc4c | |||
ac33379a35 | |||
77e3e4a090 | |||
b840bb09be | |||
8b1c1c30a7 | |||
4b81335f75 | |||
2a4b5c9d7e | |||
04662748aa | |||
a117279e13 | |||
bbb292ed38 | |||
95e8901e71 | |||
4af9626702 | |||
c52d1035de | |||
5773a14980 | |||
6939147c47 | |||
98f9916c9f | |||
021eef1000 | |||
a9d06ce151 | |||
8c6a9b8bb6 | |||
37c88027e1 | |||
9db070a3c5 | |||
7fd8d9c220 | |||
06e059b8f8 | |||
c9f49d5f9d | |||
f4c1d7df39 | |||
339b8e559c | |||
5f6d6919b4 | |||
8ee767732f | |||
45f1f9144f | |||
53589c8f12 | |||
7ac2f17fac | |||
48862c7b27 | |||
44f7d9f4e3 | |||
fd12302587 | |||
f80bef4630 | |||
161b443514 | |||
ef7fbe1c66 | |||
0879d3599e | |||
2a444dc5bd | |||
45cf1634dc | |||
dcb2922d1d | |||
3c5c751174 | |||
24ad19d0e9 | |||
bd574b05af | |||
7e0eafcb1e | |||
75670ae673 | |||
d4fcdf602b | |||
1bebb1a116 | |||
ee437cde59 | |||
c1506d38cf | |||
c9541741e6 | |||
6a55015dc4 | |||
7e86030d4d | |||
401fbea326 | |||
44d1cbdfe9 | |||
3216efef2e | |||
2c0484ebf7 | |||
3298916e5e | |||
746bf2596f | |||
5f7e094ccb | |||
6266a9f9e5 | |||
d24f981fb2 | |||
01d3bd7d5c | |||
bb12cd9b77 | |||
f02b40bcb4 | |||
83ac2842bd | |||
c4e95fb74d | |||
e23721f3fb | |||
c0a9f8ef85 | |||
6477b84eb6 | |||
24d706774d | |||
5089ab2d6a | |||
bdbb906817 | |||
fa2ebd336e | |||
21b01a21b6 | |||
b54ce5edc5 | |||
26a31b78e9 | |||
14d13c5f9f | |||
5e110c2eb5 | |||
4a9926d521 | |||
ae3c5642d0 | |||
e287a3b627 | |||
b890243690 | |||
b7b38f7d68 | |||
9f67aab211 | |||
8f0f785d88 | |||
d0b8335789 | |||
1550be79f1 | |||
807f848c2f | |||
42398f13b0 | |||
31c3482a4e | |||
50257af686 | |||
d111a0987e | |||
915bcd2c63 | |||
f69c8b6f1b | |||
8c9044bef0 | |||
5f8e928194 | |||
25da30bd60 | |||
542734100e | |||
b06b4c0c08 | |||
939d36fb4c | |||
1471e41180 | |||
35949192e9 | |||
9c817edb48 | |||
24a0feb5d9 | |||
2ab8cce7e3 | |||
b40c255e98 | |||
ec3e16445e | |||
0665168ef3 | |||
5f6b992eea | |||
3e231ab9cc | |||
371bfaca8c | |||
91e30a3a23 | |||
1e122d66f9 | |||
63a4e09a0f | |||
75dd198870 | |||
1d48457aa6 | |||
307712a903 | |||
fbc9a05ddf | |||
28496ac55e | |||
b1c06c09b0 | |||
498ac0dc27 | |||
03af461de8 | |||
f19463ece2 | |||
5f8a086e22 | |||
a28d82e373 | |||
5ccca19f0c | |||
300c07b94d | |||
31aea563a8 | |||
0377596b77 | |||
c65d0fd3c8 | |||
d9efb664ac | |||
b5b4b0f5de | |||
ab36d02560 | |||
6e67749c00 | |||
ab0385f43b | |||
10eb603a3c | |||
a3231b2f2e | |||
13db492f83 | |||
741c138aa1 | |||
25f9fee6fb | |||
7c1570bee6 | |||
4078e4c388 | |||
a4a22daa8f | |||
e1936eb2a5 | |||
28b044dad9 | |||
b8f11a0a17 | |||
ff5a838099 | |||
84713613be | |||
ded89c9d08 | |||
042e95d92f | |||
81110c0174 | |||
c313723860 | |||
e69b2371e2 | |||
1531259b2c | |||
44bc2767fd | |||
bd7ace7adc | |||
315364d7de | |||
80753d4da8 | |||
8f9bdca4c4 | |||
4e10afb5a9 | |||
aa037a60f3 | |||
19dca2bb14 | |||
55e422109b | |||
3f020fac9d | |||
1626b73b03 | |||
850f7b19d3 | |||
d4bc413505 | |||
fc49ee4479 | |||
c0ea41f6b2 | |||
0fbaac9c89 | |||
a5abfe6a90 | |||
d3f7137cc9 | |||
f7c99e49b3 | |||
1d5752fa42 | |||
b6049060dd | |||
06a1da9daf | |||
746d173592 | |||
fdbfb460ed | |||
ebca09a3d1 | |||
9f346d0084 | |||
6a94163b91 | |||
8a35b58c4f | |||
1789abca84 | |||
847f94fdeb | |||
6e40108a59 | |||
1ba185f4af | |||
396089f3cf | |||
941912467d | |||
0b1b094a67 | |||
40e52a76b9 | |||
cf977670e6 | |||
df2c364de7 | |||
1acfadb721 | |||
ea642144d2 | |||
282a8654c4 | |||
936cf3beb7 | |||
bc92c2f8f0 | |||
f7d55e0614 | |||
f62a546e03 | |||
2944cb72d9 | |||
ccc2547210 | |||
162a455402 | |||
ff2cb0811f | |||
5e9d6baa48 | |||
845f8d663e | |||
31fdf05fda | |||
0ac6666cd2 | |||
6c91da80b8 | |||
c245168ba3 | |||
280fee8fa0 | |||
78b4c1c25f | |||
1edea2eb4b | |||
96808786b7 | |||
bb57ecb85e | |||
abdb73c7cc | |||
391e548a43 | |||
2a29afd4c6 | |||
5963004ff9 | |||
ede1718f6d | |||
2ef717b293 | |||
8feb375fbd | |||
69339af2d1 | |||
0d2e2aed80 | |||
451e9ee92c | |||
1133ac98a8 | |||
76d27eec9a | |||
fe18c29ab8 | |||
234f9bd320 | |||
3b183cfae7 | |||
02285dff81 | |||
2fc1d20f9e | |||
08e8414f27 | |||
05c6139625 | |||
896c41ef30 | |||
c36ddc43c6 | |||
13f41af43e | |||
3fc5306b82 | |||
adf2474b10 | |||
008816a257 | |||
33e5a6612e | |||
f0a7d65b3d | |||
54e5095765 | |||
34291099fb | |||
d245d7aec7 | |||
d661283e68 | |||
c0761c95f5 | |||
138e20b697 | |||
a8d9abfa22 | |||
195afd6dc1 | |||
1fd78999e8 | |||
374e9e0c5e | |||
a2cb5b4183 | |||
288ae5176e | |||
d868122a5a | |||
2ba25fb122 | |||
4f4687cb74 | |||
66b00fad0d | |||
c6cc8d16c3 | |||
3f8f8a78a2 | |||
3e47686919 | |||
a53b69a003 | |||
d1c9b47360 | |||
32f659861a | |||
a785232bf9 | |||
0677293503 | |||
1fbdb813c0 | |||
67725ac8f3 | |||
dac89af357 | |||
26225f1fb0 | |||
3468983315 | |||
c7515b0995 | |||
253ce30004 | |||
03a6fae484 | |||
d37fd275fd | |||
195877fd72 | |||
9e715e1b96 | |||
6f5514b6e2 | |||
709a22b92d | |||
01e214a1d7 | |||
1cecfe6a02 | |||
3764bc974c | |||
fcffc912a9 | |||
38d40b9972 | |||
09149ee0ae | |||
6b7f37dd5c | |||
791812fb54 | |||
5d6dc19f04 | |||
34972dbe22 | |||
bea43e0c64 | |||
3853d83d73 | |||
5b1ce40fa8 | |||
049b3a0e53 | |||
a551933542 | |||
5caa19240d | |||
5236f02784 | |||
2abaf19e0d | |||
6eb7a0ffbd | |||
e8f0f9b5f0 | |||
d8e24b877d | |||
cc68f31577 | |||
4a4a52bf98 | |||
c96906d84d | |||
9600fc3eb1 | |||
e2e55a6fed | |||
c4e1861d2c | |||
da9809f243 | |||
9d754a56cf | |||
8cc90a0e80 | |||
82b5c56f63 | |||
b2ad484c89 | |||
d96a17848f | |||
0e7798677a | |||
58a36d2e3b | |||
24d8534bd8 | |||
9b16ddd3a5 | |||
32f88af17b | |||
9bf7250bf9 | |||
17e49d3ab2 | |||
58b725282a | |||
7e59afa1e0 | |||
5ac022140e | |||
0eaa67280c | |||
5a62fdb735 | |||
60098d6204 | |||
317293e6a7 | |||
488a966c07 | |||
8954769aa2 | |||
df06468d9e | |||
1fbd828a5d | |||
d2986f8b07 | |||
8bfa8574e2 | |||
376567bf4f | |||
c0fd64a9c0 | |||
6e9596f6de | |||
9e3c5345cd | |||
b6c05ce82f | |||
52c80cac00 | |||
3643120690 | |||
d65786ea54 | |||
7f78675008 | |||
22fcd5fd11 | |||
993f0df419 | |||
9b1788483c | |||
ad37d26983 | |||
81c999fe0a | |||
4b7de08bfd | |||
4b9c4de1ad | |||
be88ee1d75 | |||
3ab19c744e | |||
6eac06759b | |||
2e9a5bd2c4 | |||
58323bf8ed | |||
22058f2dbc | |||
5b7979a1e6 | |||
ee14c02365 | |||
ab39dd34e1 | |||
b1348d3530 | |||
90641b5cf4 | |||
4160b930f1 | |||
7a96e661e4 | |||
a902fb4ab2 | |||
6cb38c3673 | |||
9cf14ebcbc | |||
8e39ee171f | |||
d26250f78c | |||
5218ea21b8 | |||
e60be821ce | |||
19708df884 | |||
3f190addda | |||
b355ee7cfa | |||
49ac8872b4 | |||
8ef98ae7e3 | |||
e471adcfa5 | |||
aa816c922c | |||
b3264eb266 | |||
eb2eb87a58 | |||
83fcb0e486 | |||
f7bb412878 | |||
ef6dcf0d0c | |||
c7ea4fd235 | |||
525f190917 | |||
dd916a2852 | |||
0620fe00ec | |||
31d0a9a14f | |||
c06970dd72 | |||
7598acf525 | |||
43ddfce969 | |||
a7e6d2cd9c | |||
86506b0c5c | |||
11182fae34 | |||
0bc8bffe1d | |||
8c4f30497a | |||
b1ee3a8444 | |||
be9a16fd3f | |||
f4d9a95b0f | |||
a8ab3abe09 | |||
fb6a835938 | |||
8923bb4292 | |||
fcba6aa352 | |||
8807fe608b | |||
3e94c7a81d | |||
77af3254e1 | |||
d4b3cffec4 | |||
b852a4c5ca | |||
2157abaab4 | |||
68d609a12c | |||
5a8ae474f0 | |||
84493d7f3e | |||
15d71189e9 | |||
37e962580f | |||
db0ea7a2f2 | |||
5498b0e6c0 | |||
2af4a52c39 | |||
eee2fe882e | |||
0d1a11e5e2 | |||
b2ead7d6f4 | |||
8da6fd4dff | |||
ab8ec9e940 | |||
701265bf38 | |||
fe36c90971 | |||
6739eb83c3 | |||
f68298ce06 | |||
7ae885c1ef | |||
d207c68822 | |||
16d72504fe | |||
1c31f9d4a8 | |||
8ecb2f1f68 | |||
5226c3d45c | |||
dbf9c15e30 | |||
d3f6c34976 | |||
425e2910a3 | |||
49868aa851 | |||
ff08e30ab5 | |||
95f2a191c0 | |||
00422ec3cf | |||
c5b05321e9 | |||
5dc636a65a | |||
73703a144f | |||
e89fdceec2 | |||
29a2739d27 | |||
ee6d17f6b4 | |||
95e90823d9 | |||
005cc45df3 | |||
c2c60dc9ba | |||
4af3194b7c | |||
4a2ba1a065 | |||
f096cc6807 | |||
e4bc83ab47 | |||
db7e0dbe6e | |||
bf88c94da9 | |||
3eea171cab | |||
64a56ebf13 | |||
bec9836849 | |||
c118733a29 | |||
bb3dd45524 | |||
04e7fa6f4f | |||
9f7f36d4c9 | |||
4a62efbb95 | |||
0a55a70b9b | |||
dc8cc2dd6f | |||
3efedb9511 | |||
e30c679928 | |||
bf4cb4abad | |||
e293f17d34 | |||
5d950c4b8d | |||
820446e230 | |||
54d5823ebe | |||
5181494e9f | |||
4a6e6e8b30 | |||
de29b193f6 | |||
922971041b | |||
63a767a134 | |||
30841fa786 | |||
3b1ac03828 | |||
990de617b5 | |||
6975600b4b | |||
061eeb9f61 | |||
4942b1b428 | |||
3c7cc5c437 | |||
5cd42ee2cc | |||
ee718f3da6 | |||
63eac1f608 | |||
b17ba2815b | |||
7a489af2f3 | |||
4a4ea13d6d | |||
174a461fc6 | |||
d8b7a24bc9 | |||
acf3832c9c | |||
d29ac44303 | |||
12638dfef0 | |||
f100b3b523 | |||
a99e213a82 | |||
7483d2b61c | |||
1fe5948227 | |||
760497e1ab | |||
b172e7714c | |||
dc01aadb18 | |||
e08c62149b | |||
abab4500fa | |||
e666315fa8 | |||
3f869af14c | |||
cbacb7634c | |||
6cc3b022ee | |||
e5e38d4920 | |||
2a6bab5655 | |||
8c01c9b85c | |||
d1123d795e | |||
9b3d784020 | |||
a16137d13d | |||
5582039d0a | |||
9a16c643e2 | |||
10a8a23100 | |||
29cfeef77f | |||
e66e9ea25b | |||
276779a849 | |||
1f35ce61c1 | |||
4b19cc3ed4 | |||
a535d348dd | |||
8f5dc729d9 | |||
02fc147a0b | |||
109148ac84 | |||
3563473d2c | |||
046834198d | |||
0a2ad9de06 | |||
39b0640b09 | |||
8dca71de64 | |||
812787cbc5 | |||
68ef10805e | |||
96fdb90f5f | |||
e98f9ac554 | |||
02d481595b | |||
7091c7ab5a | |||
d70ccb75f5 | |||
5ee048eb67 | |||
37ed71c964 | |||
8cd7a3df37 | |||
04a3279320 | |||
45ddda8e0c | |||
c41317fd66 | |||
96b8419b27 | |||
3c63f4cf35 | |||
5848dfd9c8 | |||
29ab5d0326 | |||
c4d6958b3e | |||
c9dcb75118 | |||
bbdbc3fc62 | |||
28c207a541 | |||
c23f830983 | |||
caeeb32b41 | |||
584cc1177a | |||
cc1ae10989 | |||
eb26f55b40 | |||
eb2b086584 | |||
67919cfe11 | |||
bf5fc81a8a | |||
2b07dc3186 | |||
951c463d39 | |||
7f257b210f | |||
705fe30a02 | |||
45b5b95e29 | |||
f2c47d1e6a | |||
b4bb9b9036 | |||
2bc6483299 | |||
ec52f900e4 | |||
77d708fabb | |||
c00149c861 | |||
574661f2e6 | |||
7bd69349bf | |||
488ad99c13 | |||
7178cceeaa | |||
8d55ccdb8c | |||
37a72cb170 | |||
bf9b69284f | |||
c4de1e19df | |||
5b7073cae1 | |||
b29b3b2924 |
@ -12,7 +12,7 @@ FROM ${BASE_CUDA_DEV_CONTAINER} as build
|
|||||||
ARG CUDA_DOCKER_ARCH=all
|
ARG CUDA_DOCKER_ARCH=all
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y build-essential git cmake
|
apt-get install -y build-essential git cmake libsdl2-dev wget
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
@ -21,8 +21,8 @@ COPY . .
|
|||||||
# Set nvcc architecture
|
# Set nvcc architecture
|
||||||
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
||||||
# Enable cuBLAS
|
# Enable cuBLAS
|
||||||
ENV WHISPER_CUBLAS=1
|
ENV GGML_CUDA=1
|
||||||
|
|
||||||
RUN make
|
RUN make base.en
|
||||||
|
|
||||||
ENTRYPOINT ["/app/main"]
|
ENTRYPOINT ["/app/main"]
|
||||||
|
@ -14,10 +14,10 @@ ARG CUDA_DOCKER_ARCH=all
|
|||||||
# Set nvcc architecture
|
# Set nvcc architecture
|
||||||
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
||||||
# Enable cuBLAS
|
# Enable cuBLAS
|
||||||
ENV WHISPER_CUBLAS=1
|
ENV GGML_CUDA=1
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y build-essential \
|
apt-get install -y build-essential libsdl2-dev wget cmake \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
# Ref: https://stackoverflow.com/a/53464012
|
# Ref: https://stackoverflow.com/a/53464012
|
||||||
@ -25,7 +25,7 @@ ENV CUDA_MAIN_VERSION=12.3
|
|||||||
ENV LD_LIBRARY_PATH /usr/local/cuda-${CUDA_MAIN_VERSION}/compat:$LD_LIBRARY_PATH
|
ENV LD_LIBRARY_PATH /usr/local/cuda-${CUDA_MAIN_VERSION}/compat:$LD_LIBRARY_PATH
|
||||||
|
|
||||||
COPY .. .
|
COPY .. .
|
||||||
RUN make
|
RUN make base.en
|
||||||
|
|
||||||
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
|
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
|
||||||
ENV CUDA_MAIN_VERSION=12.3
|
ENV CUDA_MAIN_VERSION=12.3
|
||||||
@ -33,7 +33,7 @@ ENV LD_LIBRARY_PATH /usr/local/cuda-${CUDA_MAIN_VERSION}/compat:$LD_LIBRARY_PATH
|
|||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y curl ffmpeg \
|
apt-get install -y curl ffmpeg wget cmake \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
COPY --from=build /app /app
|
COPY --from=build /app /app
|
||||||
|
@ -2,17 +2,17 @@ FROM ubuntu:22.04 AS build
|
|||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y build-essential \
|
apt-get install -y build-essential wget cmake \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
COPY .. .
|
COPY .. .
|
||||||
RUN make
|
RUN make base.en
|
||||||
|
|
||||||
FROM ubuntu:22.04 AS runtime
|
FROM ubuntu:22.04 AS runtime
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y curl ffmpeg \
|
apt-get install -y curl ffmpeg libsdl2-dev wget cmake \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||||
|
|
||||||
COPY --from=build /app /app
|
COPY --from=build /app /app
|
||||||
|
6
.github/workflows/bindings-go.yml
vendored
6
.github/workflows/bindings-go.yml
vendored
@ -13,10 +13,10 @@ jobs:
|
|||||||
ubuntu-latest:
|
ubuntu-latest:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '^1.19'
|
go-version: '^1.23'
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v4
|
||||||
- run: |
|
- run: |
|
||||||
cd bindings/go
|
cd bindings/go
|
||||||
make test
|
make test
|
||||||
|
47
.github/workflows/bindings-ruby.yml
vendored
47
.github/workflows/bindings-ruby.yml
vendored
@ -3,20 +3,53 @@ on:
|
|||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- bindings/ruby/**
|
- bindings/ruby/**
|
||||||
- whisper.h
|
- src/**/*.c
|
||||||
|
- src/**/*.cpp
|
||||||
|
- src/**/*.h
|
||||||
|
- src/**/*.m
|
||||||
|
- src/**/*.metal
|
||||||
|
- include/**/*.c
|
||||||
|
- include/**/*.cpp
|
||||||
|
- include/**/*.h
|
||||||
|
- include/**/*.m
|
||||||
|
- include/**/*.metal
|
||||||
|
- ggml/**/*.c
|
||||||
|
- ggml/**/*.cpp
|
||||||
|
- ggml/**/*.h
|
||||||
|
- ggml/**/*.m
|
||||||
|
- ggml/**/*.metal
|
||||||
|
- scripts/get-flags.mk
|
||||||
|
- examples/dr_wav.h
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- bindings/ruby/**
|
- bindings/ruby/**
|
||||||
- whisper.h
|
- src/**/*.c
|
||||||
|
- src/**/*.cpp
|
||||||
|
- src/**/*.h
|
||||||
|
- src/**/*.m
|
||||||
|
- src/**/*.metal
|
||||||
|
- include/**/*.c
|
||||||
|
- include/**/*.cpp
|
||||||
|
- include/**/*.h
|
||||||
|
- include/**/*.m
|
||||||
|
- include/**/*.metal
|
||||||
|
- ggml/**/*.c
|
||||||
|
- ggml/**/*.cpp
|
||||||
|
- ggml/**/*.h
|
||||||
|
- ggml/**/*.m
|
||||||
|
- ggml/**/*.metal
|
||||||
|
- scripts/get-flags.mk
|
||||||
|
- examples/dr_wav.h
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
ubuntu-latest:
|
ubuntu-latest:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: bindings/ruby
|
||||||
steps:
|
steps:
|
||||||
- uses: ruby/setup-ruby@v1
|
- uses: ruby/setup-ruby@v1
|
||||||
with:
|
with:
|
||||||
ruby-version: '3.0'
|
ruby-version: '3.1'
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v4
|
||||||
- run: |
|
- run: rake test
|
||||||
cd bindings/ruby/ext
|
|
||||||
ruby extconf.rb && make
|
|
||||||
|
480
.github/workflows/build.yml
vendored
480
.github/workflows/build.yml
vendored
@ -1,8 +1,19 @@
|
|||||||
name: CI
|
name: CI
|
||||||
on: [push, pull_request]
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
pull_request:
|
||||||
|
types: [opened, synchronize, reopened]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
ubuntu_image: "ubuntu:22.04"
|
ubuntu_image: "ubuntu:22.04"
|
||||||
|
VCPKG_BINARY_SOURCES: "clear;x-gha,readwrite"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
ubuntu-latest:
|
ubuntu-latest:
|
||||||
@ -11,7 +22,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le]
|
arch: [linux/amd64, linux/ppc64le]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
@ -27,9 +38,61 @@ jobs:
|
|||||||
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
set -e
|
set -e
|
||||||
apt update
|
apt update
|
||||||
apt install -y build-essential libsdl2-dev
|
apt install -y build-essential libsdl2-dev cmake
|
||||||
make
|
cmake -B build
|
||||||
make stream'
|
cmake --build build --config Release -j $(nproc)'
|
||||||
|
|
||||||
|
ubuntu-latest-arm64:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
arch: [linux/arm64]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Build ${{ matrix.arch }}
|
||||||
|
run: |
|
||||||
|
docker run --platform ${{ matrix.arch }} --rm \
|
||||||
|
-v ${{ github.workspace }}:/workspace \
|
||||||
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
|
set -e
|
||||||
|
apt update
|
||||||
|
apt install -y build-essential libsdl2-dev cmake
|
||||||
|
cmake -B build -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8-a
|
||||||
|
cmake --build build --config Release -j $(nproc)'
|
||||||
|
|
||||||
|
ubuntu-latest-arm-v7:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
arch: [linux/arm/v7]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Build ${{ matrix.arch }}
|
||||||
|
run: |
|
||||||
|
docker run --platform ${{ matrix.arch }} --rm \
|
||||||
|
-v ${{ github.workspace }}:/workspace \
|
||||||
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
|
set -e
|
||||||
|
apt update
|
||||||
|
apt install -y build-essential libsdl2-dev cmake
|
||||||
|
cmake -B build -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv7-a+fp
|
||||||
|
cmake --build build --config Release -j $(nproc)'
|
||||||
|
|
||||||
macOS-latest:
|
macOS-latest:
|
||||||
runs-on: macOS-latest
|
runs-on: macOS-latest
|
||||||
@ -41,30 +104,30 @@ jobs:
|
|||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
brew update
|
brew update
|
||||||
brew install sdl2
|
brew install sdl2 cmake
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
make
|
cmake -B build
|
||||||
make stream
|
cmake --build build --config Release
|
||||||
|
|
||||||
freeBSD-latest:
|
# freeBSD-latest:
|
||||||
runs-on: macos-12
|
# runs-on: macos-12
|
||||||
|
#
|
||||||
steps:
|
# steps:
|
||||||
- name: Clone
|
# - name: Clone
|
||||||
uses: actions/checkout@v4
|
# uses: actions/checkout@v4
|
||||||
|
#
|
||||||
- name: Build
|
# - name: Build
|
||||||
uses: cross-platform-actions/action@v0.24.0
|
# uses: cross-platform-actions/action@v0.24.0
|
||||||
with:
|
# with:
|
||||||
operating_system: freebsd
|
# operating_system: freebsd
|
||||||
version: '13.2'
|
# version: '13.3'
|
||||||
run: |
|
# run: |
|
||||||
sudo pkg update
|
# sudo pkg update
|
||||||
sudo pkg install -y gmake sdl2
|
# sudo pkg install -y gmake sdl2 cmake
|
||||||
gmake
|
# cmake -B build
|
||||||
gmake stream
|
# cmake --build build --config Release
|
||||||
|
|
||||||
ubuntu-latest-gcc:
|
ubuntu-latest-gcc:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -73,7 +136,7 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
build: [Debug, Release]
|
build: [Debug, Release]
|
||||||
arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le]
|
arch: [linux/amd64, linux/ppc64le]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
@ -94,6 +157,62 @@ jobs:
|
|||||||
make
|
make
|
||||||
ctest -L gh --output-on-failure'
|
ctest -L gh --output-on-failure'
|
||||||
|
|
||||||
|
ubuntu-latest-gcc-arm64:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
build: [Debug, Release]
|
||||||
|
arch: [linux/arm64]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Build ${{ matrix.arch }}
|
||||||
|
run: |
|
||||||
|
docker run --platform ${{ matrix.arch }} --rm \
|
||||||
|
-v ${{ github.workspace }}:/workspace \
|
||||||
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
|
set -e
|
||||||
|
apt update
|
||||||
|
apt install -y build-essential cmake libsdl2-dev
|
||||||
|
cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8-a
|
||||||
|
make
|
||||||
|
ctest -L gh --output-on-failure'
|
||||||
|
|
||||||
|
ubuntu-latest-gcc-arm-v7:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
build: [Debug, Release]
|
||||||
|
arch: [linux/arm/v7]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Build ${{ matrix.arch }}
|
||||||
|
run: |
|
||||||
|
docker run --platform ${{ matrix.arch }} --rm \
|
||||||
|
-v ${{ github.workspace }}:/workspace \
|
||||||
|
-w /workspace ${{ env.ubuntu_image }} /bin/sh -c '
|
||||||
|
set -e
|
||||||
|
apt update
|
||||||
|
apt install -y build-essential cmake libsdl2-dev
|
||||||
|
cmake . -DWHISPER_SDL2=ON -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv7-a+fp
|
||||||
|
make
|
||||||
|
ctest -L gh --output-on-failure'
|
||||||
|
|
||||||
ubuntu-latest-clang:
|
ubuntu-latest-clang:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
@ -101,7 +220,10 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
build: [Debug, Release]
|
build: [Debug, Release]
|
||||||
arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le]
|
#arch: [linux/amd64, linux/arm64, linux/arm/v7, linux/ppc64le]
|
||||||
|
# TODO: arm/v7 disabled due to clang bug
|
||||||
|
# https://github.com/ggerganov/whisper.cpp/actions/runs/9657764109/job/26637633042?pr=2256#step:4:1990
|
||||||
|
arch: [linux/amd64, linux/arm64, linux/ppc64le]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
@ -197,7 +319,7 @@ jobs:
|
|||||||
source /opt/intel/oneapi/setvars.sh
|
source /opt/intel/oneapi/setvars.sh
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -DWHISPER_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
|
cmake -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
|
||||||
cmake --build . --config Release -j $(nproc)
|
cmake --build . --config Release -j $(nproc)
|
||||||
|
|
||||||
ubuntu-22-cmake-sycl-fp16:
|
ubuntu-22-cmake-sycl-fp16:
|
||||||
@ -247,7 +369,7 @@ jobs:
|
|||||||
source /opt/intel/oneapi/setvars.sh
|
source /opt/intel/oneapi/setvars.sh
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -DWHISPER_SYCL_F16=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
|
cmake -DGGML_SYCL_F16=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ..
|
||||||
cmake --build . --config Release -j $(nproc)
|
cmake --build . --config Release -j $(nproc)
|
||||||
|
|
||||||
windows-msys2:
|
windows-msys2:
|
||||||
@ -276,25 +398,10 @@ jobs:
|
|||||||
mingw-w64-${{matrix.env}}-SDL2
|
mingw-w64-${{matrix.env}}-SDL2
|
||||||
mingw-w64-${{matrix.env}}-openblas
|
mingw-w64-${{matrix.env}}-openblas
|
||||||
|
|
||||||
- name: Build using make
|
|
||||||
shell: msys2 {0}
|
|
||||||
run: |
|
|
||||||
make -j $(nproc)
|
|
||||||
|
|
||||||
- name: Clean after building using make
|
|
||||||
shell: msys2 {0}
|
|
||||||
run: |
|
|
||||||
make clean
|
|
||||||
|
|
||||||
- name: Build using make w/ OpenBLAS
|
|
||||||
shell: msys2 {0}
|
|
||||||
run: |
|
|
||||||
make WHISPER_OPENBLAS=1 -j $(nproc)
|
|
||||||
|
|
||||||
- name: Build using CMake
|
- name: Build using CMake
|
||||||
shell: msys2 {0}
|
shell: msys2 {0}
|
||||||
run: |
|
run: |
|
||||||
cmake -B build
|
cmake -B build -DWHISPER_SDL2=ON
|
||||||
cmake --build build --config ${{ matrix.build }} -j $(nproc)
|
cmake --build build --config ${{ matrix.build }} -j $(nproc)
|
||||||
|
|
||||||
- name: Clean after building using CMake
|
- name: Clean after building using CMake
|
||||||
@ -305,7 +412,7 @@ jobs:
|
|||||||
- name: Build using CMake w/ OpenBLAS
|
- name: Build using CMake w/ OpenBLAS
|
||||||
shell: msys2 {0}
|
shell: msys2 {0}
|
||||||
run: |
|
run: |
|
||||||
cmake -B build -DWHISPER_OPENBLAS=ON
|
cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
||||||
cmake --build build --config ${{ matrix.build }} -j $(nproc)
|
cmake --build build --config ${{ matrix.build }} -j $(nproc)
|
||||||
|
|
||||||
windows:
|
windows:
|
||||||
@ -379,14 +486,9 @@ jobs:
|
|||||||
sdl2: [ON]
|
sdl2: [ON]
|
||||||
include:
|
include:
|
||||||
- arch: Win32
|
- arch: Win32
|
||||||
obzip: https://github.com/OpenMathLib/OpenBLAS/releases/download/v0.3.25/OpenBLAS-0.3.25-x86.zip
|
|
||||||
s2arc: x86
|
s2arc: x86
|
||||||
clblast: OFF
|
|
||||||
- arch: x64
|
- arch: x64
|
||||||
obzip: https://github.com/OpenMathLib/OpenBLAS/releases/download/v0.3.25/OpenBLAS-0.3.25-x64.zip
|
|
||||||
s2arc: x64
|
s2arc: x64
|
||||||
clblast: ON
|
|
||||||
clver: 1.6.1
|
|
||||||
- sdl2: ON
|
- sdl2: ON
|
||||||
s2ver: 2.28.5
|
s2ver: 2.28.5
|
||||||
|
|
||||||
@ -394,17 +496,21 @@ jobs:
|
|||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Export GitHub Actions cache environment variables
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || '');
|
||||||
|
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
|
||||||
|
|
||||||
- name: Add msbuild to PATH
|
- name: Add msbuild to PATH
|
||||||
uses: microsoft/setup-msbuild@v2
|
uses: microsoft/setup-msbuild@v2
|
||||||
|
|
||||||
- name: Fetch OpenBLAS
|
- name: Install OpenBLAS and pkgconfiglite
|
||||||
if: matrix.blas == 'ON'
|
if: matrix.blas == 'ON'
|
||||||
run: |
|
run: |
|
||||||
C:/msys64/usr/bin/wget.exe -qO blas.zip ${{ matrix.obzip }}
|
vcpkg install --triplet=${{ matrix.s2arc }}-windows openblas
|
||||||
7z x blas.zip -oblas -y
|
choco install pkgconfiglite
|
||||||
copy blas/include/cblas.h .
|
|
||||||
copy blas/include/openblas_config.h .
|
|
||||||
echo "OPENBLAS_PATH=$env:GITHUB_WORKSPACE/blas" >> $env:GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Fetch SDL2 and set SDL2_DIR
|
- name: Fetch SDL2 and set SDL2_DIR
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
@ -413,54 +519,37 @@ jobs:
|
|||||||
7z x sdl2.zip
|
7z x sdl2.zip
|
||||||
echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV
|
echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV
|
||||||
|
|
||||||
- name: Install OpenCL
|
|
||||||
if: matrix.clblast == 'ON'
|
|
||||||
run: vcpkg.exe --triplet=${{ matrix.arch }}-windows install opencl
|
|
||||||
|
|
||||||
- name: Fetch CLBlast and set CLBlast_DIR
|
|
||||||
if: matrix.clblast == 'ON'
|
|
||||||
run: |
|
|
||||||
C:/msys64/usr/bin/wget.exe -qO clblast.zip https://github.com/CNugteren/CLBlast/releases/download/${{ matrix.clver }}/CLBlast-${{ matrix.clver }}-windows-x64.zip
|
|
||||||
7z x clblast.zip
|
|
||||||
7z x CLBlast-${{ matrix.clver }}-windows-x64.7z
|
|
||||||
echo "CLBlast_DIR=$env:GITHUB_WORKSPACE/CLBlast-${{ matrix.clver }}-windows-x64/lib/cmake/CLBlast" >> $env:GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Configure
|
- name: Configure
|
||||||
run: >
|
run: >
|
||||||
cmake -S . -B ./build -A ${{ matrix.arch }}
|
cmake -S . -B ./build -A ${{ matrix.arch }}
|
||||||
|
-DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_INSTALLATION_ROOT/scripts/buildsystems/vcpkg.cmake"
|
||||||
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
-DWHISPER_OPENBLAS=${{ matrix.blas }}
|
-DGGML_BLAS=${{ matrix.blas }}
|
||||||
-DCMAKE_LIBRARY_PATH="$env:OPENBLAS_PATH/lib"
|
-DGGML_BLAS_VENDOR=OpenBLAS
|
||||||
-DWHISPER_SDL2=${{ matrix.sdl2 }}
|
-DWHISPER_SDL2=${{ matrix.sdl2 }}
|
||||||
-DWHISPER_CLBLAST=${{ matrix.clblast }}
|
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
cd ./build
|
cd ./build
|
||||||
msbuild ALL_BUILD.vcxproj -t:build -p:configuration=${{ matrix.build }} -p:platform=${{ matrix.arch }}
|
msbuild ALL_BUILD.vcxproj -t:build -p:configuration=${{ matrix.build }} -p:platform=${{ matrix.arch }}
|
||||||
|
|
||||||
- name: Copy libopenblas.dll
|
- name: Copy openblas.dll
|
||||||
if: matrix.blas == 'ON'
|
if: matrix.blas == 'ON'
|
||||||
run: copy "$env:OPENBLAS_PATH/bin/libopenblas.dll" build/bin/${{ matrix.build }}
|
run: copy "C:/vcpkg/packages/openblas_${{ matrix.s2arc }}-windows/bin/openblas.dll" build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
- name: Copy SDL2.dll
|
- name: Copy SDL2.dll
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
- name: Copy clblast.dll
|
|
||||||
if: matrix.clblast == 'ON'
|
|
||||||
run: copy "$env:CLBlast_DIR/../../clblast.dll" build/bin/${{ matrix.build }}
|
|
||||||
|
|
||||||
- name: Upload binaries
|
- name: Upload binaries
|
||||||
if: matrix.blas == 'ON' && matrix.sdl2 == 'ON'
|
if: matrix.blas == 'ON' && matrix.sdl2 == 'ON'
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: whisper-blas${{ matrix.clblast == 'ON' && '-clblast' || ''}}-bin-${{ matrix.arch }}
|
name: whisper-blas-bin-${{ matrix.arch }}
|
||||||
path: build/bin/${{ matrix.build }}
|
path: build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
windows-cublas:
|
windows-cublas:
|
||||||
runs-on: windows-2019
|
runs-on: windows-2019
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
build: [Release]
|
build: [Release]
|
||||||
@ -470,12 +559,10 @@ jobs:
|
|||||||
cuda-toolkit: [12.2.0, 11.8.0]
|
cuda-toolkit: [12.2.0, 11.8.0]
|
||||||
include:
|
include:
|
||||||
- arch: x64
|
- arch: x64
|
||||||
s2arc: x64
|
sdl2: ON
|
||||||
- sdl2: ON
|
sdl2_ver: 2.28.5
|
||||||
s2ver: 2.28.5
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Add msbuild to PATH
|
- name: Add msbuild to PATH
|
||||||
@ -487,38 +574,43 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
cuda: '${{ matrix.cuda-toolkit }}'
|
cuda: '${{ matrix.cuda-toolkit }}'
|
||||||
|
|
||||||
|
- name: Install 7-Zip
|
||||||
|
run: choco install 7zip -y
|
||||||
|
|
||||||
- name: Fetch SDL2 and set SDL2_DIR
|
- name: Fetch SDL2 and set SDL2_DIR
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
run: |
|
run: |
|
||||||
C:/msys64/usr/bin/wget.exe -qO sdl2.zip https://github.com/libsdl-org/SDL/releases/download/release-${{ matrix.s2ver }}/SDL2-devel-${{ matrix.s2ver }}-VC.zip
|
Invoke-WebRequest -Uri https://github.com/libsdl-org/SDL/releases/download/release-${{ matrix.sdl2_ver }}/SDL2-devel-${{ matrix.sdl2_ver }}-VC.zip -OutFile sdl2.zip
|
||||||
7z x sdl2.zip
|
7z x sdl2.zip
|
||||||
echo "SDL2_DIR=$env:GITHUB_WORKSPACE/SDL2-${{ matrix.s2ver }}/cmake" >> $env:GITHUB_ENV
|
echo "SDL2_DIR=${{ github.workspace }}\SDL2-${{ matrix.sdl2_ver }}\cmake" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
|
echo "${{ github.workspace }}\SDL2-${{ matrix.sdl2_ver }}\cmake" > SDL2_PATH.txt
|
||||||
- name: Configure
|
|
||||||
run: >
|
- name: Configure CMake
|
||||||
cmake -S . -B ./build -A ${{ matrix.arch }}
|
shell: cmd
|
||||||
-DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
run: |
|
||||||
-DWHISPER_CUDA=${{ matrix.cublas }}
|
cmake -S . -B ./build -A ${{ matrix.arch }} ^
|
||||||
-DWHISPER_SDL2=${{ matrix.sdl2 }}
|
-DCMAKE_BUILD_TYPE=${{ matrix.build }} ^
|
||||||
|
-DGGML_CUDA=${{ matrix.cublas }} ^
|
||||||
- name: Build ${{ matrix.cuda-toolkit }}
|
-DCMAKE_CUDA_ARCHITECTURES=all ^
|
||||||
|
-DWHISPER_SDL2=${{ matrix.sdl2 }} ^
|
||||||
|
-DSDL2_DIR="%SDL2_DIR%"
|
||||||
|
|
||||||
|
- name: Build Project
|
||||||
|
shell: cmd
|
||||||
run: |
|
run: |
|
||||||
cd ./build
|
cd ./build
|
||||||
cmake --build . --config ${{ matrix.build }}
|
cmake --build . --config ${{ matrix.build }}
|
||||||
|
|
||||||
- name: Copy CUDA DLLs
|
- name: Copy CUDA DLLs
|
||||||
run: >
|
run: |
|
||||||
Copy-Item -PassThru
|
Get-ChildItem "${{ steps.cuda-toolkit.outputs.CUDA_PATH }}/bin/" -Filter "*.dll" |
|
||||||
-Path "${{ steps.cuda-toolkit.outputs.CUDA_PATH }}/bin/*.dll"
|
Copy-Item -Destination "build/bin/${{ matrix.build }}"
|
||||||
-Include cudart64_*,cublas64_*,cublasLt64_*
|
|
||||||
-Destination build/bin/${{ matrix.build }}
|
|
||||||
|
|
||||||
- name: Copy SDL2.dll
|
- name: Copy SDL2.dll
|
||||||
if: matrix.sdl2 == 'ON'
|
if: matrix.sdl2 == 'ON'
|
||||||
run: copy "$env:SDL2_DIR/../lib/${{ matrix.s2arc }}/SDL2.dll" build/bin/${{ matrix.build }}
|
run: copy "$env:SDL2_DIR/../lib/${{ matrix.arch }}/SDL2.dll" build/bin/${{ matrix.build }}
|
||||||
|
|
||||||
- name: Upload binaries
|
- name: Upload binaries
|
||||||
if: matrix.sdl2 == 'ON'
|
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: whisper-cublas-${{ matrix.cuda-toolkit }}-bin-${{ matrix.arch }}
|
name: whisper-cublas-${{ matrix.cuda-toolkit }}-bin-${{ matrix.arch }}
|
||||||
@ -546,7 +638,7 @@ jobs:
|
|||||||
emcmake cmake . -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
emcmake cmake . -DCMAKE_BUILD_TYPE=${{ matrix.build }}
|
||||||
make
|
make
|
||||||
|
|
||||||
ios:
|
ios-xcode-build:
|
||||||
runs-on: macos-latest
|
runs-on: macos-latest
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
@ -554,7 +646,7 @@ jobs:
|
|||||||
build: [Release]
|
build: [Release]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Configure
|
- name: Configure
|
||||||
@ -562,11 +654,34 @@ jobs:
|
|||||||
cp models/for-tests-ggml-base.en.bin models/ggml-base.en.bin
|
cp models/for-tests-ggml-base.en.bin models/ggml-base.en.bin
|
||||||
mkdir models/ggml-base.en-encoder.mlmodelc
|
mkdir models/ggml-base.en-encoder.mlmodelc
|
||||||
|
|
||||||
- name: Build objc example
|
- name: Build
|
||||||
run: xcodebuild -project examples/whisper.objc/whisper.objc.xcodeproj -scheme whisper.objc -configuration ${{ matrix.build }} -sdk iphonesimulator build
|
id: cmake_build
|
||||||
|
run: |
|
||||||
|
sysctl -a
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -G Xcode .. \
|
||||||
|
-DGGML_METAL_USE_BF16=ON \
|
||||||
|
-DGGML_METAL_EMBED_LIBRARY=ON \
|
||||||
|
-DWHISPER_BUILD_EXAMPLES=OFF \
|
||||||
|
-DWHISPER_BUILD_TESTS=OFF \
|
||||||
|
-DWHISPER_BUILD_SERVER=OFF \
|
||||||
|
-DCMAKE_SYSTEM_NAME=iOS \
|
||||||
|
-DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
|
||||||
|
-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
|
||||||
|
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
|
||||||
|
sudo cmake --install . --config Release
|
||||||
|
|
||||||
|
- name: xcodebuild for swift package
|
||||||
|
id: xcodebuild
|
||||||
|
run: |
|
||||||
|
xcodebuild -scheme whisper-Package -destination 'generic/platform=iOS'
|
||||||
|
|
||||||
|
#- name: Build objc example
|
||||||
|
# run: xcodebuild -project examples/whisper.objc/whisper.objc.xcodeproj -scheme whisper.objc -configuration ${{ matrix.build }} -sdk iphoneos build
|
||||||
|
|
||||||
- name: Build swiftui example
|
- name: Build swiftui example
|
||||||
run: xcodebuild -project examples/whisper.swiftui/whisper.swiftui.xcodeproj -scheme WhisperCppDemo -configuration ${{ matrix.build }} -sdk iphonesimulator build
|
run: xcodebuild -project examples/whisper.swiftui/whisper.swiftui.xcodeproj -scheme WhisperCppDemo -configuration ${{ matrix.build }} -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build
|
||||||
|
|
||||||
android:
|
android:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -577,12 +692,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
path: whisper
|
path: whisper
|
||||||
|
|
||||||
- name: Clone
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
repository: ggerganov/ggml
|
|
||||||
path: ggml
|
|
||||||
|
|
||||||
- name: Install Java
|
- name: Install Java
|
||||||
uses: actions/setup-java@v4
|
uses: actions/setup-java@v4
|
||||||
with:
|
with:
|
||||||
@ -601,75 +710,77 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
export PATH_TO_GGML=$PWD/ggml
|
export PATH_TO_GGML=$PWD/ggml
|
||||||
cd whisper/examples/whisper.android
|
cd whisper/examples/whisper.android
|
||||||
./gradlew assembleRelease --no-daemon -PGGML_HOME=$PATH_TO_GGML
|
./gradlew assembleRelease --no-daemon
|
||||||
|
|
||||||
android_java:
|
# TODO: disable because of following fail: https://github.com/ggerganov/whisper.cpp/actions/runs/11019444420/job/30627193602
|
||||||
runs-on: ubuntu-latest
|
# android_java:
|
||||||
|
# runs-on: ubuntu-latest
|
||||||
|
#
|
||||||
|
# steps:
|
||||||
|
# - name: Clone
|
||||||
|
# uses: actions/checkout@v4
|
||||||
|
#
|
||||||
|
# - name: set up JDK 11
|
||||||
|
# uses: actions/setup-java@v4
|
||||||
|
# with:
|
||||||
|
# java-version: '11'
|
||||||
|
# distribution: 'temurin'
|
||||||
|
# cache: gradle
|
||||||
|
#
|
||||||
|
# - name: Setup Android SDK
|
||||||
|
# uses: android-actions/setup-android@v3
|
||||||
|
# with:
|
||||||
|
# cmdline-tools-version: 9.0
|
||||||
|
#
|
||||||
|
# - name: Build
|
||||||
|
# run: |
|
||||||
|
# cd examples/whisper.android.java
|
||||||
|
# chmod +x ./gradlew
|
||||||
|
# ./gradlew assembleRelease
|
||||||
|
|
||||||
steps:
|
# TODO: disabled because of following fail: https://github.com/ggerganov/whisper.cpp/actions/runs/9686220096/job/26735899598
|
||||||
- name: Clone
|
# java:
|
||||||
uses: actions/checkout@v4
|
# needs: [ 'windows' ]
|
||||||
|
# runs-on: windows-latest
|
||||||
- name: set up JDK 11
|
# steps:
|
||||||
uses: actions/setup-java@v4
|
# - uses: actions/checkout@v4
|
||||||
with:
|
#
|
||||||
java-version: '11'
|
# - name: Install Java
|
||||||
distribution: 'temurin'
|
# uses: actions/setup-java@v4
|
||||||
cache: gradle
|
# with:
|
||||||
|
# distribution: zulu
|
||||||
- name: Setup Android SDK
|
# java-version: 20
|
||||||
uses: android-actions/setup-android@v3
|
#
|
||||||
with:
|
# - name: Download Windows lib
|
||||||
cmdline-tools-version: 9.0
|
# uses: actions/download-artifact@v4
|
||||||
|
# with:
|
||||||
- name: Build
|
# name: win32-x86-64_whisper.dll
|
||||||
run: |
|
# path: bindings/java/build/generated/resources/main/win32-x86-64
|
||||||
cd examples/whisper.android.java
|
#
|
||||||
chmod +x ./gradlew
|
# - name: Build
|
||||||
./gradlew assembleRelease
|
# run: |
|
||||||
|
# models\download-ggml-model.cmd tiny.en
|
||||||
java:
|
# cd bindings/java
|
||||||
needs: [ 'windows' ]
|
# chmod +x ./gradlew
|
||||||
runs-on: windows-latest
|
# ./gradlew build
|
||||||
steps:
|
#
|
||||||
- uses: actions/checkout@v4
|
# - name: Upload jar
|
||||||
|
# uses: actions/upload-artifact@v4
|
||||||
- name: Install Java
|
# with:
|
||||||
uses: actions/setup-java@v4
|
# name: whispercpp.jar
|
||||||
with:
|
# path: bindings/java/build/libs/whispercpp-*.jar
|
||||||
distribution: zulu
|
#
|
||||||
java-version: 20
|
# - name: Publish package
|
||||||
|
# if: ${{ github.ref == 'refs/heads/master' }}
|
||||||
- name: Download Windows lib
|
# uses: gradle/gradle-build-action@v2.4.2
|
||||||
uses: actions/download-artifact@v4
|
# with:
|
||||||
with:
|
# arguments: publish
|
||||||
name: win32-x86-64_whisper.dll
|
# build-root-directory: bindings/java
|
||||||
path: bindings/java/build/generated/resources/main/win32-x86-64
|
# env:
|
||||||
|
# MAVEN_USERNAME: ${{ secrets.JIRA_USER }}
|
||||||
- name: Build
|
# MAVEN_PASSWORD: ${{ secrets.JIRA_PASS }}
|
||||||
run: |
|
# PGP_SECRET: ${{ secrets.GPG_PRIVATE_KEY }}
|
||||||
models\download-ggml-model.cmd tiny.en
|
# PGP_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
|
||||||
cd bindings/java
|
|
||||||
chmod +x ./gradlew
|
|
||||||
./gradlew build
|
|
||||||
|
|
||||||
- name: Upload jar
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: whispercpp.jar
|
|
||||||
path: bindings/java/build/libs/whispercpp-*.jar
|
|
||||||
|
|
||||||
- name: Publish package
|
|
||||||
if: ${{ github.ref == 'refs/heads/master' }}
|
|
||||||
uses: gradle/gradle-build-action@v2.4.2
|
|
||||||
with:
|
|
||||||
arguments: publish
|
|
||||||
build-root-directory: bindings/java
|
|
||||||
env:
|
|
||||||
MAVEN_USERNAME: ${{ secrets.JIRA_USER }}
|
|
||||||
MAVEN_PASSWORD: ${{ secrets.JIRA_PASS }}
|
|
||||||
PGP_SECRET: ${{ secrets.GPG_PRIVATE_KEY }}
|
|
||||||
PGP_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
|
|
||||||
|
|
||||||
quantize:
|
quantize:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -681,5 +792,6 @@ jobs:
|
|||||||
- name: Test quantize
|
- name: Test quantize
|
||||||
run: |
|
run: |
|
||||||
./models/download-ggml-model.sh tiny.en
|
./models/download-ggml-model.sh tiny.en
|
||||||
make quantize
|
cmake -B build
|
||||||
./quantize models/ggml-tiny.en.bin models/ggml-tiny.en-q4_0.bin q4_0
|
cmake --build build --config Release
|
||||||
|
./build/bin/quantize models/ggml-tiny.en.bin models/ggml-tiny.en-q4_0.bin q4_0
|
||||||
|
10
.github/workflows/docker.yml
vendored
10
.github/workflows/docker.yml
vendored
@ -17,8 +17,10 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
config:
|
config:
|
||||||
- { tag: "main", dockerfile: ".devops/main.Dockerfile", platform: "linux/amd64,linux/arm64" }
|
- { tag: "main", dockerfile: ".devops/main.Dockerfile", platform: "linux/amd64" }
|
||||||
- { tag: "main-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platform: "linux/amd64" }
|
#TODO: the cuda image keeps failing - disable for now
|
||||||
|
# https://github.com/ggerganov/whisper.cpp/actions/runs/11019444428/job/30602020339
|
||||||
|
#- { tag: "main-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platform: "linux/amd64" }
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out the repo
|
- name: Check out the repo
|
||||||
@ -43,7 +45,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
push: true
|
push: true
|
||||||
platforms: ${{ matrix.config.platforms }}
|
platforms: ${{ matrix.config.platform }}
|
||||||
tags: "ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}"
|
tags: "ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}-${{ env.COMMIT_SHA }}"
|
||||||
file: ${{ matrix.config.dockerfile }}
|
file: ${{ matrix.config.dockerfile }}
|
||||||
|
|
||||||
@ -52,6 +54,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
push: ${{ github.event_name == 'push' }}
|
push: ${{ github.event_name == 'push' }}
|
||||||
platforms: ${{ matrix.config.platforms }}
|
platforms: ${{ matrix.config.platform }}
|
||||||
tags: "ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}"
|
tags: "ghcr.io/${{ github.repository }}:${{ matrix.config.tag }}"
|
||||||
file: ${{ matrix.config.dockerfile }}
|
file: ${{ matrix.config.dockerfile }}
|
||||||
|
17
.gitignore
vendored
17
.gitignore
vendored
@ -1,31 +1,28 @@
|
|||||||
*.o
|
*.o
|
||||||
*.a
|
*.a
|
||||||
|
*.d
|
||||||
.cache/
|
.cache/
|
||||||
.coreml/
|
.coreml/
|
||||||
.test/
|
.test/
|
||||||
|
.venv/
|
||||||
.vs/
|
.vs/
|
||||||
.vscode/
|
.vscode/
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.vimspector.json
|
.vimspector.json
|
||||||
/CMakeSettings.json
|
/CMakeSettings.json
|
||||||
|
/talk-llama.dSYM/
|
||||||
|
|
||||||
build/
|
build/
|
||||||
build-coreml/
|
build-*/
|
||||||
build-em/
|
|
||||||
build-debug/
|
|
||||||
build-release/
|
|
||||||
build-rwdi/
|
|
||||||
build-static/
|
|
||||||
build-cublas/
|
|
||||||
build-no-accel/
|
|
||||||
build-sanitize-addr/
|
|
||||||
build-sanitize-thread/
|
|
||||||
|
|
||||||
# SPM
|
# SPM
|
||||||
.build/
|
.build/
|
||||||
.swiftpm
|
.swiftpm
|
||||||
*.metallib
|
*.metallib
|
||||||
|
|
||||||
|
ggml-metal-embed.metal
|
||||||
|
ggml-metal-embed.metal.tmp
|
||||||
|
|
||||||
/main
|
/main
|
||||||
/stream
|
/stream
|
||||||
/command
|
/command
|
||||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,3 +0,0 @@
|
|||||||
[submodule "bindings/ios"]
|
|
||||||
path = bindings/ios
|
|
||||||
url = https://github.com/ggerganov/whisper.spm
|
|
||||||
|
851
CMakeLists.txt
851
CMakeLists.txt
@ -1,25 +1,31 @@
|
|||||||
cmake_minimum_required (VERSION 3.5)
|
cmake_minimum_required(VERSION 3.5) # for add_link_options and implicit target directories.
|
||||||
|
project("whisper.cpp" C CXX)
|
||||||
|
project("whisper.cpp" VERSION 1.7.4)
|
||||||
|
include(CheckIncludeFileCXX)
|
||||||
|
|
||||||
# Allow for the creation of solution folders.
|
|
||||||
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
|
|
||||||
|
|
||||||
project(whisper.cpp VERSION 1.6.2)
|
|
||||||
set(SOVERSION 1)
|
set(SOVERSION 1)
|
||||||
|
|
||||||
|
#set(CMAKE_WARN_DEPRECATED YES)
|
||||||
|
set(CMAKE_WARN_UNUSED_CLI YES)
|
||||||
|
|
||||||
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||||
|
|
||||||
|
if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
|
||||||
|
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
|
||||||
|
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
|
||||||
|
endif()
|
||||||
|
|
||||||
# Add path to modules
|
# Add path to modules
|
||||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
||||||
|
|
||||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
||||||
|
|
||||||
if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
|
if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
|
||||||
set(WHISPER_STANDALONE ON)
|
set(WHISPER_STANDALONE ON)
|
||||||
include(GitVars)
|
|
||||||
include(BuildTypes)
|
include(git-vars)
|
||||||
|
|
||||||
# configure project version
|
# configure project version
|
||||||
if (EXISTS "${CMAKE_SOURCE_DIR}/bindings/ios/Makefile-tmpl")
|
|
||||||
configure_file(${CMAKE_SOURCE_DIR}/bindings/ios/Makefile-tmpl ${CMAKE_SOURCE_DIR}/bindings/ios/Makefile @ONLY)
|
|
||||||
endif()
|
|
||||||
configure_file(${CMAKE_SOURCE_DIR}/bindings/javascript/package-tmpl.json ${CMAKE_SOURCE_DIR}/bindings/javascript/package.json @ONLY)
|
configure_file(${CMAKE_SOURCE_DIR}/bindings/javascript/package-tmpl.json ${CMAKE_SOURCE_DIR}/bindings/javascript/package.json @ONLY)
|
||||||
else()
|
else()
|
||||||
set(WHISPER_STANDALONE OFF)
|
set(WHISPER_STANDALONE OFF)
|
||||||
@ -29,6 +35,11 @@ if (EMSCRIPTEN)
|
|||||||
set(BUILD_SHARED_LIBS_DEFAULT OFF)
|
set(BUILD_SHARED_LIBS_DEFAULT OFF)
|
||||||
|
|
||||||
option(WHISPER_WASM_SINGLE_FILE "whisper: embed WASM inside the generated whisper.js" ON)
|
option(WHISPER_WASM_SINGLE_FILE "whisper: embed WASM inside the generated whisper.js" ON)
|
||||||
|
|
||||||
|
# TODO: without these, we get the following error:
|
||||||
|
# wasm-ld: error: --shared-memory is disallowed by whisper.cpp.o because it was not compiled with 'atomics' or 'bulk-memory' features.
|
||||||
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread -s TOTAL_STACK=5242880")
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread -s TOTAL_STACK=5242880")
|
||||||
else()
|
else()
|
||||||
if (MINGW)
|
if (MINGW)
|
||||||
set(BUILD_SHARED_LIBS_DEFAULT OFF)
|
set(BUILD_SHARED_LIBS_DEFAULT OFF)
|
||||||
@ -37,756 +48,136 @@ else()
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# options
|
option(BUILD_SHARED_LIBS "build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT})
|
||||||
|
|
||||||
if (APPLE)
|
#
|
||||||
set(WHISPER_METAL_DEFAULT ON)
|
# option list
|
||||||
else()
|
#
|
||||||
set(WHISPER_METAL_DEFAULT OFF)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
option(BUILD_SHARED_LIBS "whisper: build shared libs" ${BUILD_SHARED_LIBS_DEFAULT})
|
# general
|
||||||
|
option(WHISPER_CCACHE "whisper: use ccache if available" ON)
|
||||||
|
|
||||||
|
# debug
|
||||||
option(WHISPER_ALL_WARNINGS "whisper: enable all compiler warnings" ON)
|
option(WHISPER_ALL_WARNINGS "whisper: enable all compiler warnings" ON)
|
||||||
option(WHISPER_ALL_WARNINGS_3RD_PARTY "whisper: enable all compiler warnings in 3rd party libs" OFF)
|
option(WHISPER_ALL_WARNINGS_3RD_PARTY "whisper: enable all compiler warnings in 3rd party libs" OFF)
|
||||||
|
|
||||||
option(WHISPER_SANITIZE_THREAD "whisper: enable thread sanitizer" OFF)
|
# build
|
||||||
option(WHISPER_SANITIZE_ADDRESS "whisper: enable address sanitizer" OFF)
|
option(WHISPER_FATAL_WARNINGS "whisper: enable -Werror flag" OFF)
|
||||||
option(WHISPER_SANITIZE_UNDEFINED "whisper: enable undefined sanitizer" OFF)
|
|
||||||
|
|
||||||
option(WHISPER_BUILD_TESTS "whisper: build tests" ${WHISPER_STANDALONE})
|
|
||||||
option(WHISPER_BUILD_EXAMPLES "whisper: build examples" ${WHISPER_STANDALONE})
|
|
||||||
|
|
||||||
option(WHISPER_SDL2 "whisper: support for libSDL2" OFF)
|
|
||||||
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
|
||||||
option(WHISPER_FFMPEG "whisper: support building and linking with ffmpeg libs (avcodec, swresample, ...)" OFF)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
option(WHISPER_NO_AVX "whisper: disable AVX" OFF)
|
|
||||||
option(WHISPER_NO_AVX2 "whisper: disable AVX2" OFF)
|
|
||||||
option(WHISPER_NO_AVX512 "whisper: disable AVX512" ON)
|
|
||||||
option(WHISPER_NO_AVX512_VBMI "whisper: disable AVX512-VBMI" ON)
|
|
||||||
option(WHISPER_NO_AVX512_VNNI "whisper: disable AVX512-VNNI" ON)
|
|
||||||
option(WHISPER_NO_FMA "whisper: disable FMA" OFF)
|
|
||||||
option(WHISPER_NO_F16C "whisper: disable F16c" OFF)
|
|
||||||
|
|
||||||
option(WHISPER_OPENVINO "whisper: support for OpenVINO" OFF)
|
|
||||||
|
|
||||||
if (APPLE)
|
|
||||||
option(WHISPER_NO_ACCELERATE "whisper: disable Accelerate framework" OFF)
|
|
||||||
option(WHISPER_METAL "whisper: use Metal" ${WHISPER_METAL_DEFAULT})
|
|
||||||
option(WHISPER_METAL_NDEBUG "whisper: disable Metal debugging" OFF)
|
|
||||||
option(WHISPER_COREML "whisper: enable Core ML framework" OFF)
|
|
||||||
option(WHISPER_COREML_ALLOW_FALLBACK "whisper: allow non-CoreML fallback" OFF)
|
|
||||||
option(WHISPER_METAL_EMBED_LIBRARY "whisper: embed Metal library" OFF)
|
|
||||||
else()
|
|
||||||
option(WHISPER_BLAS "whisper: use BLAS libraries" OFF)
|
|
||||||
option(WHISPER_BLAS_VENDOR "whisper: BLAS library vendor" Generic)
|
|
||||||
option(WHISPER_OPENBLAS "whisper: prefer OpenBLAS" OFF)
|
|
||||||
option(WHISPER_OPENBLAS_INTERFACE64 "whisper: use OpenBLAS w/ 64-bit interface" OFF)
|
|
||||||
option(WHISPER_CUDA "whisper: support for CUDA" OFF)
|
|
||||||
option(WHISPER_CUBLAS "whisper: support for CUDA (deprecated)" OFF)
|
|
||||||
option(WHISPER_HIPBLAS "whisper: support for hipBLAS" OFF)
|
|
||||||
option(WHISPER_CLBLAST "whisper: use CLBlast" OFF)
|
|
||||||
option(WHISPER_MKL "whisper: use Intel Math Kernel Library (MKL)" OFF)
|
|
||||||
option(WHISPER_SYCL "whisper: use SYCL" OFF)
|
|
||||||
option(WHISPER_SYCL_F16 "whisper: use 16 bit floats for sycl calculations" OFF)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
option(WHISPER_PERF "whisper: enable perf timings" OFF)
|
|
||||||
|
|
||||||
# sanitizers
|
# sanitizers
|
||||||
|
option(WHISPER_SANITIZE_THREAD "whisper: enable thread sanitizer" OFF)
|
||||||
|
option(WHISPER_SANITIZE_ADDRESS "whisper: enable address sanitizer" OFF)
|
||||||
|
option(WHISPER_SANITIZE_UNDEFINED "whisper: enable undefined sanitizer" OFF)
|
||||||
|
|
||||||
if (NOT MSVC)
|
# extra artifacts
|
||||||
if (WHISPER_SANITIZE_THREAD)
|
option(WHISPER_BUILD_TESTS "whisper: build tests" ${WHISPER_STANDALONE})
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=thread")
|
option(WHISPER_BUILD_EXAMPLES "whisper: build examples" ${WHISPER_STANDALONE})
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread")
|
option(WHISPER_BUILD_SERVER "whisper: build server example" ${WHISPER_STANDALONE})
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_SANITIZE_ADDRESS)
|
# 3rd party libs
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address -fno-omit-frame-pointer")
|
option(WHISPER_CURL "whisper: use libcurl to download model from an URL" OFF)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address -fno-omit-frame-pointer")
|
option(WHISPER_SDL2 "whisper: support for libSDL2" OFF)
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_SANITIZE_UNDEFINED)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -ffast-math")
|
|
||||||
#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native")
|
|
||||||
|
|
||||||
# dependencies
|
|
||||||
|
|
||||||
find_package(Threads REQUIRED)
|
|
||||||
|
|
||||||
#compile flag sycl
|
|
||||||
if (WHISPER_SYCL)
|
|
||||||
set(CMAKE_CXX_STANDARD 17)
|
|
||||||
else()
|
|
||||||
set(CMAKE_CXX_STANDARD 11)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_FFMPEG)
|
|
||||||
# As of cmake 3.27, there is no official cmake support for FindFFmpeg.
|
|
||||||
# Consequnelty we added a FindFFmpeg.cmake script the cmake subfolder:
|
|
||||||
# whisper.cpp does not need the full ffmpeg libs, just AVFORMAT AVCODEC AVUTIL SWRESAMPLE
|
|
||||||
# libswresample performs highly optimized audio resampling, rematrixing and sample format conversion operations
|
|
||||||
# libavcodec provides a generic encoding/decoding framework and contains multiple decoders and encoders for audio, video and subtitle streams, and several bitstream filters.
|
|
||||||
# libavformat provides a generic framework for multiplexing and demultiplexing (muxing and demuxing) audio, video and subtitle streams.
|
|
||||||
find_package(FFmpeg REQUIRED)
|
|
||||||
if (NOT ${FFMPEG_FOUND})
|
|
||||||
message(FATAL_ERROR "Cannot find ffmpeg libs/headers")
|
|
||||||
endif()
|
|
||||||
message(STATUS "Found ffmpeg libs: ${FFMPEG_LIBRARIES}")
|
|
||||||
message(STATUS "Found ffmpeg headers in: ${FFMPEG_INCLUDE_DIRS}")
|
|
||||||
message(STATUS "ffmpeg definitions: ${FFMPEG_DEFINITIONS}")
|
|
||||||
message(STATUS "Found avformat ${AVFORMAT_VERSION}")
|
|
||||||
include_directories(${FFMPEG_INCLUDE_DIRS})
|
|
||||||
add_compile_definitions(WHISPER_FFMPEG)
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${FFMPEG_LIBRARIES})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# on APPLE
|
|
||||||
if (APPLE)
|
|
||||||
# include Accelerate framework
|
|
||||||
if (NOT WHISPER_NO_ACCELERATE)
|
|
||||||
find_library(ACCELERATE_FRAMEWORK Accelerate)
|
|
||||||
|
|
||||||
if (ACCELERATE_FRAMEWORK)
|
|
||||||
message(STATUS "Accelerate framework found")
|
|
||||||
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${ACCELERATE_FRAMEWORK})
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64)
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "Accelerate framework not found")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_METAL)
|
|
||||||
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
|
|
||||||
find_library(METAL_FRAMEWORK Metal REQUIRED)
|
|
||||||
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
|
|
||||||
|
|
||||||
if (METAL_FRAMEWORK)
|
|
||||||
message(STATUS "Metal framework found")
|
|
||||||
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS}
|
|
||||||
${FOUNDATION_LIBRARY}
|
|
||||||
${METAL_FRAMEWORK}
|
|
||||||
${METALKIT_FRAMEWORK}
|
|
||||||
)
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_METAL)
|
|
||||||
|
|
||||||
if (WHISPER_METAL_NDEBUG)
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_METAL_NDEBUG)
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "Metal framework not found")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h)
|
|
||||||
|
|
||||||
# copy ggml-common.h and ggml-metal.metal to bin directory
|
|
||||||
configure_file(ggml-common.h bin/ggml-common.h COPYONLY)
|
|
||||||
configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY)
|
|
||||||
|
|
||||||
if (WHISPER_METAL_EMBED_LIBRARY)
|
|
||||||
enable_language(ASM)
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_METAL_EMBED_LIBRARY)
|
|
||||||
|
|
||||||
set(METALLIB_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal")
|
|
||||||
set(COMMON_HEADER "${CMAKE_CURRENT_SOURCE_DIR}/ggml-common.h")
|
|
||||||
|
|
||||||
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/autogenerated")
|
|
||||||
set(EMBED_METALLIB_ASSEMBLY "${CMAKE_BINARY_DIR}/autogenerated/ggml-embed-metallib.s")
|
|
||||||
set(EMBED_METALLIB_SOURCE "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-combined.metal")
|
|
||||||
|
|
||||||
add_custom_command(
|
|
||||||
OUTPUT ${EMBED_METALLIB_SOURCE}
|
|
||||||
COMMAND sed -e "/^#include \\\"ggml-common.h\\\"/r ${COMMON_HEADER}" -e "/^#include \\\"ggml-common.h\\\"/d" ${METALLIB_SOURCE} > ${EMBED_METALLIB_SOURCE}
|
|
||||||
DEPENDS ${METALLIB_SOURCE} ${COMMON_HEADER}
|
|
||||||
COMMENT "Generating combined Metal library for embedding"
|
|
||||||
)
|
|
||||||
|
|
||||||
add_custom_command(
|
|
||||||
OUTPUT ${EMBED_METALLIB_ASSEMBLY}
|
|
||||||
COMMAND echo ".section __DATA,__ggml_metallib" > ${EMBED_METALLIB_ASSEMBLY}
|
|
||||||
COMMAND echo ".globl _ggml_metallib_start" >> ${EMBED_METALLIB_ASSEMBLY}
|
|
||||||
COMMAND echo "_ggml_metallib_start:" >> ${EMBED_METALLIB_ASSEMBLY}
|
|
||||||
COMMAND echo ".incbin \\\"${EMBED_METALLIB_SOURCE}\\\"" >> ${EMBED_METALLIB_ASSEMBLY}
|
|
||||||
COMMAND echo ".globl _ggml_metallib_end" >> ${EMBED_METALLIB_ASSEMBLY}
|
|
||||||
COMMAND echo "_ggml_metallib_end:" >> ${EMBED_METALLIB_ASSEMBLY}
|
|
||||||
DEPENDS ${EMBED_METALLIB_SOURCE}
|
|
||||||
COMMENT "Generate assembly for embedded Metal library"
|
|
||||||
)
|
|
||||||
|
|
||||||
set(GGML_SOURCES_METAL ${GGML_SOURCES_METAL} ${EMBED_METALLIB_ASSEMBLY})
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_COREML)
|
|
||||||
find_library(FOUNDATION_FRAMEWORK Foundation)
|
|
||||||
find_library(COREML_FRAMEWORK CoreML)
|
|
||||||
|
|
||||||
if (COREML_FRAMEWORK)
|
|
||||||
message(STATUS "CoreML framework found")
|
|
||||||
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_USE_COREML)
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "CoreML framework not found")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_COREML_ALLOW_FALLBACK)
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_COREML_ALLOW_FALLBACK)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_OPENBLAS)
|
|
||||||
set(WHISPER_BLAS_VENDOR "OpenBLAS")
|
|
||||||
set(WHISPER_BLAS ON)
|
|
||||||
# BLA_PKGCONFIG_BLAS is supported since CMake 3.25.
|
|
||||||
# FindBLAS.cmake pkg-config logic seems incomplete, because when
|
|
||||||
# BLA_SIZEOF_INTEGER is 8, then it should search for blas64 instead of blas.
|
|
||||||
# blas.pc/blas64.pc are not always provided, so let's be more specific
|
|
||||||
# and go with openblas.pc/openblas64.pc if WHISPER_OPENBLAS is on.
|
|
||||||
if (WHISPER_OPENBLAS_INTERFACE64)
|
|
||||||
set(WHISPER_BLAS_LIB "openblas64")
|
|
||||||
else ()
|
|
||||||
set(WHISPER_BLAS_LIB "openblas")
|
|
||||||
endif ()
|
|
||||||
set(BLA_PKGCONFIG_BLAS ${WHISPER_BLAS_LIB})
|
|
||||||
# OpenBLAS prebuilt libraries for Windows do not have "64" suffix in filename.
|
|
||||||
# (But .pc file has "64" suffix in filename for USE_64BITINT=1 Windows build.)
|
|
||||||
if (MSVC)
|
|
||||||
set(WHISPER_BLAS_LIB "openblas")
|
|
||||||
endif ()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_BLAS)
|
|
||||||
if (NOT "$ENV{OPENBLAS_PATH}" STREQUAL "")
|
|
||||||
if (WHISPER_STATIC)
|
|
||||||
set(WHISPER_BLAS_LIB_PREFIX ${CMAKE_STATIC_LIBRARY_PREFIX})
|
|
||||||
set(WHISPER_BLAS_LIB_SUFFIX ${CMAKE_STATIC_LIBRARY_SUFFIX})
|
|
||||||
else ()
|
|
||||||
if (CMAKE_IMPORT_LIBRARY_SUFFIX)
|
|
||||||
set(WHISPER_BLAS_LIB_PREFIX ${CMAKE_IMPORT_LIBRARY_PREFIX})
|
|
||||||
set(WHISPER_BLAS_LIB_SUFFIX ${CMAKE_IMPORT_LIBRARY_SUFFIX})
|
|
||||||
else ()
|
|
||||||
set(WHISPER_BLAS_LIB_PREFIX ${CMAKE_SHARED_LIBRARY_PREFIX})
|
|
||||||
set(WHISPER_BLAS_LIB_SUFFIX ${CMAKE_SHARED_LIBRARY_SUFFIX})
|
|
||||||
endif ()
|
|
||||||
endif ()
|
|
||||||
# OpenBLAS prebuilt libraries hardcode "lib" prefix in filename even on Windows
|
|
||||||
if (WHISPER_OPENBLAS)
|
|
||||||
set(WHISPER_BLAS_LIB_PREFIX "lib")
|
|
||||||
endif ()
|
|
||||||
message(STATUS "BLAS compatible library path provided")
|
|
||||||
set(BLAS_LIBRARIES "$ENV{OPENBLAS_PATH}/lib/${WHISPER_BLAS_LIB_PREFIX}${WHISPER_BLAS_LIB}${WHISPER_BLAS_LIB_SUFFIX}")
|
|
||||||
message(STATUS "Libraries ${BLAS_LIBRARIES}")
|
|
||||||
set(BLAS_INCLUDE_DIRS "$ENV{OPENBLAS_PATH}/include")
|
|
||||||
message(STATUS "Include dirs ${BLAS_INCLUDE_DIRS}")
|
|
||||||
if (NOT EXISTS "${BLAS_LIBRARIES}")
|
|
||||||
message(FATAL_ERROR "BLAS library was not found. Environment variable OPENBLAS_PATH misdefined.")
|
|
||||||
endif ()
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_OPENBLAS)
|
|
||||||
include_directories(${BLAS_INCLUDE_DIRS})
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${BLAS_LIBRARIES})
|
|
||||||
else ()
|
|
||||||
if (WHISPER_STATIC)
|
|
||||||
# FindBLAS.cmake pkg-config logic seems incomplete, because when
|
|
||||||
# BLA_STATIC is on, then it should use pkg_check_modules_static
|
|
||||||
# instead of pkg_check_modules.
|
|
||||||
# Some manual variable overriding may be necessary if you don't
|
|
||||||
# achieve desired results.
|
|
||||||
set(BLA_STATIC 1)
|
|
||||||
endif ()
|
|
||||||
set(BLA_VENDOR ${WHISPER_BLAS_VENDOR})
|
|
||||||
if (WHISPER_OPENBLAS_INTERFACE64)
|
|
||||||
set(BLA_SIZEOF_INTEGER 8)
|
|
||||||
else ()
|
|
||||||
set(BLA_SIZEOF_INTEGER 4)
|
|
||||||
endif()
|
|
||||||
set(BLA_PREFER_PKGCONFIG 1)
|
|
||||||
find_package(BLAS)
|
|
||||||
|
|
||||||
if(BLAS_FOUND)
|
|
||||||
message(STATUS "BLAS compatible library found")
|
|
||||||
message(STATUS "Libraries ${BLAS_LIBRARIES}")
|
|
||||||
if (NOT DEFINED BLAS_INCLUDE_DIRS)
|
|
||||||
if (PKGC_BLAS_FOUND)
|
|
||||||
set(BLAS_INCLUDE_DIRS "${PKGC_BLAS_INCLUDE_DIRS}")
|
|
||||||
else ()
|
|
||||||
find_path(BLAS_INCLUDE_DIRS cblas.h /usr/include/openblas)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
message(STATUS "Include dirs ${BLAS_INCLUDE_DIRS}")
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_OPENBLAS)
|
|
||||||
include_directories(${BLAS_INCLUDE_DIRS})
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${BLAS_LIBRARIES})
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "BLAS library was not found")
|
|
||||||
endif()
|
|
||||||
endif ()
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (WHISPER_MKL)
|
|
||||||
find_package(MKL CONFIG REQUIRED PATHS $ENV{MKLROOT})
|
|
||||||
message(STATUS "Imported oneMKL targets: ${MKL_IMPORTED_TARGETS}")
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_OPENBLAS)
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_BLAS_USE_MKL)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_CUBLAS)
|
|
||||||
message(WARNING "WHISPER_CUBLAS is deprecated and will be removed in the future.\nUse WHISPER_CUDA instead")
|
|
||||||
set(WHISPER_CUDA ON)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_CUDA)
|
|
||||||
cmake_minimum_required(VERSION 3.17)
|
|
||||||
|
|
||||||
find_package(CUDAToolkit)
|
|
||||||
|
|
||||||
if (CUDAToolkit_FOUND)
|
|
||||||
message(STATUS "cuBLAS found")
|
|
||||||
|
|
||||||
enable_language(CUDA)
|
|
||||||
|
|
||||||
file(GLOB GGML_SOURCES_CUDA "ggml-cuda/*.cu")
|
|
||||||
list(APPEND GGML_SOURCES_CUDA ggml-cuda.h)
|
|
||||||
list(APPEND GGML_SOURCES_CUDA ggml-cuda.cu)
|
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_CUDA)
|
|
||||||
|
|
||||||
if (WHISPER_STATIC)
|
|
||||||
if (WIN32)
|
|
||||||
# As of 12.3.1 CUDA Tookit for Windows does not offer a static cublas library
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas CUDA::cublasLt CUDA::cufft)
|
|
||||||
else ()
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static CUDA::cufft_static)
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt CUDA::cufft)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} CUDA::cuda_driver)
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "cuBLAS not found")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
|
|
||||||
if (WHISPER_HIPBLAS)
|
|
||||||
list(APPEND CMAKE_PREFIX_PATH /opt/rocm)
|
|
||||||
if (NOT ${CMAKE_C_COMPILER_ID} MATCHES "Clang")
|
|
||||||
message(WARNING "Only LLVM is supported for HIP, hint: CC=/opt/rocm/llvm/bin/clang")
|
|
||||||
endif()
|
|
||||||
if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
|
|
||||||
message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
find_package(hip)
|
|
||||||
find_package(hipblas)
|
|
||||||
find_package(rocblas)
|
|
||||||
|
|
||||||
if (${hipblas_FOUND} AND ${hip_FOUND})
|
|
||||||
message(STATUS "HIP and hipBLAS found")
|
|
||||||
set(GGML_HEADERS_ROCM "ggml-cuda.h")
|
|
||||||
|
|
||||||
file(GLOB GGML_SOURCES_ROCM "ggml-cuda/*.cu")
|
|
||||||
list(APPEND GGML_SOURCES_ROCM "ggml-cuda.cu")
|
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUDA)
|
|
||||||
|
|
||||||
set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX)
|
|
||||||
if (WHISPER_STATIC)
|
|
||||||
message(FATAL_ERROR "Static linking not supported for HIP/ROCm")
|
|
||||||
endif()
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} hip::device PUBLIC hip::host roc::rocblas roc::hipblas)
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "hipBLAS or HIP not found. Try setting CMAKE_PREFIX_PATH=/opt/rocm")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_CLBLAST)
|
|
||||||
find_package(CLBlast)
|
|
||||||
if (CLBlast_FOUND)
|
|
||||||
message(STATUS "CLBlast found")
|
|
||||||
|
|
||||||
set(GGML_SOURCES_OPENCL ggml-opencl.cpp ggml-opencl.h)
|
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_CLBLAST)
|
|
||||||
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} clblast)
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "CLBlast not found")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if( WHISPER_OPENVINO )
|
|
||||||
find_package(OpenVINO REQUIRED COMPONENTS Runtime)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_SYCL)
|
|
||||||
if ( NOT DEFINED ENV{ONEAPI_ROOT})
|
|
||||||
message(FATAL_ERROR "Not detect ENV {ONEAPI_ROOT}, please install oneAPI & source it, like: source /opt/intel/oneapi/setvars.sh")
|
|
||||||
endif()
|
|
||||||
#todo: AOT
|
|
||||||
|
|
||||||
find_package(IntelSYCL REQUIRED)
|
|
||||||
if (WHISPER_SYCL_F16)
|
|
||||||
add_compile_definitions(GGML_SYCL_F16)
|
|
||||||
endif()
|
|
||||||
add_compile_definitions(GGML_USE_SYCL)
|
|
||||||
|
|
||||||
add_compile_options(-I./) #include DPCT
|
|
||||||
add_compile_options(-I/${SYCL_INCLUDE_DIR})
|
|
||||||
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl -L${MKLROOT}/lib")
|
|
||||||
|
|
||||||
set(GGML_HEADERS_SYCL ggml-sycl.h)
|
|
||||||
set(GGML_SOURCES_SYCL ggml-sycl.cpp)
|
|
||||||
|
|
||||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} sycl OpenCL mkl_core pthread m dl mkl_sycl_blas mkl_intel_ilp64 mkl_tbb_thread)
|
|
||||||
endif()
|
|
||||||
# compiler flags
|
|
||||||
|
|
||||||
if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
|
||||||
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
|
|
||||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "RelWithDebInfo")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (WHISPER_ALL_WARNINGS)
|
|
||||||
if (NOT MSVC)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} \
|
|
||||||
-Wall \
|
|
||||||
-Wextra \
|
|
||||||
-Wpedantic \
|
|
||||||
-Wshadow \
|
|
||||||
-Wcast-qual \
|
|
||||||
-Wstrict-prototypes \
|
|
||||||
-Wpointer-arith \
|
|
||||||
-Wno-unused-function \
|
|
||||||
")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} \
|
|
||||||
-Wall \
|
|
||||||
-Wextra \
|
|
||||||
-Wpedantic \
|
|
||||||
-Wcast-qual \
|
|
||||||
")
|
|
||||||
else()
|
|
||||||
# todo : msvc
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (NOT MSVC)
|
|
||||||
# TODO: temporary disabled until we figure out ggml-metal.m
|
|
||||||
#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror=vla")
|
|
||||||
#set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-math-errno -ffinite-math-only -funsafe-math-optimizations")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}")
|
|
||||||
|
|
||||||
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
|
|
||||||
message(STATUS "ARM detected")
|
|
||||||
elseif(${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64le")
|
|
||||||
message(STATUS "PowerPC detected")
|
|
||||||
else()
|
|
||||||
message(STATUS "x86 detected")
|
|
||||||
if (MSVC)
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /utf-8")
|
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /utf-8")
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /utf-8")
|
|
||||||
if(NOT WHISPER_NO_AVX512)
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX512")
|
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX512")
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX512")
|
|
||||||
# MSVC has no compile-time flags enabling specific
|
|
||||||
# AVX512 extensions, neither it defines the
|
|
||||||
# macros corresponding to the extensions.
|
|
||||||
# Do it manually.
|
|
||||||
if (NOT WHISPER_NO_AVX512_VBMI)
|
|
||||||
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VBMI__>)
|
|
||||||
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VBMI__>)
|
|
||||||
endif()
|
|
||||||
if (NOT WHISPER_NO_AVX512_VNNI)
|
|
||||||
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>)
|
|
||||||
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>)
|
|
||||||
endif()
|
|
||||||
elseif(NOT WHISPER_NO_AVX2)
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2")
|
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX2")
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX2")
|
|
||||||
elseif(NOT WHISPER_NO_AVX)
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX")
|
|
||||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX")
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX")
|
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
if (EMSCRIPTEN)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread -s TOTAL_STACK=5242880")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread -s TOTAL_STACK=5242880")
|
|
||||||
else()
|
|
||||||
if(NOT WHISPER_NO_AVX)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
|
||||||
endif()
|
|
||||||
if(NOT WHISPER_NO_AVX2)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2")
|
|
||||||
endif()
|
|
||||||
if(NOT WHISPER_NO_AVX512)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512cd -mavx512vl -mavx512dq -mavx512bw")
|
|
||||||
if(NOT WHISPER_NO_AVX512_VBMI)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vbmi")
|
|
||||||
endif()
|
|
||||||
if(NOT WHISPER_NO_AVX512_VNNI)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vnni")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
if(NOT WHISPER_NO_FMA)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
|
||||||
endif()
|
|
||||||
if(NOT WHISPER_NO_F16C)
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mf16c")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
#
|
|
||||||
# POSIX conformance
|
|
||||||
#
|
|
||||||
|
|
||||||
# clock_gettime came in POSIX.1b (1993)
|
|
||||||
# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional
|
|
||||||
# posix_memalign came in POSIX.1-2001 / SUSv3
|
|
||||||
# M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985)
|
|
||||||
add_compile_definitions(_XOPEN_SOURCE=600)
|
|
||||||
|
|
||||||
# Somehow in OpenBSD whenever POSIX conformance is specified
|
|
||||||
# some string functions rely on locale_t availability,
|
|
||||||
# which was introduced in POSIX.1-2008, forcing us to go higher
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
|
|
||||||
remove_definitions(-D_XOPEN_SOURCE=600)
|
|
||||||
add_compile_definitions(_XOPEN_SOURCE=700)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Data types, macros and functions related to controlling CPU affinity
|
|
||||||
# are available on Linux through GNU extensions in libc
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||||
add_compile_definitions(_GNU_SOURCE)
|
option(WHISPER_FFMPEG "whisper: support building and linking with ffmpeg libs (avcodec, swresample, ...)" OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1,
|
option(WHISPER_COREML "whisper: enable Core ML framework" OFF)
|
||||||
# and on macOS its availability depends on enabling Darwin extensions
|
option(WHISPER_COREML_ALLOW_FALLBACK "whisper: allow non-CoreML fallback" OFF)
|
||||||
# similarly on DragonFly, enabling BSD extensions is necessary
|
option(WHISPER_OPENVINO "whisper: support for OpenVINO" OFF)
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
|
||||||
add_compile_definitions(_DARWIN_C_SOURCE)
|
|
||||||
endif()
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "DragonFly")
|
|
||||||
add_compile_definitions(_DARWIN_C_SOURCE)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# alloca is a non-standard interface that is not visible on BSDs when
|
# Required for relocatable CMake package
|
||||||
# POSIX conformance is specified, but not all of them provide a clean way
|
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake)
|
||||||
# to enable it in such cases
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
|
||||||
add_compile_definitions(__BSD_VISIBLE)
|
|
||||||
endif()
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "NetBSD")
|
|
||||||
add_compile_definitions(_NETBSD_SOURCE)
|
|
||||||
endif()
|
|
||||||
if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
|
|
||||||
add_compile_definitions(_BSD_SOURCE)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_PERF)
|
# override ggml options
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_PERF)
|
set(GGML_CCACHE ${WHISPER_CCACHE})
|
||||||
endif()
|
set(GGML_SANITIZE_THREAD ${WHISPER_SANITIZE_THREAD})
|
||||||
|
set(GGML_SANITIZE_ADDRESS ${WHISPER_SANITIZE_ADDRESS})
|
||||||
|
set(GGML_SANITIZE_UNDEFINED ${WHISPER_SANITIZE_UNDEFINED})
|
||||||
|
set(GGML_ALL_WARNINGS ${WHISPER_ALL_WARNINGS})
|
||||||
|
set(GGML_FATAL_WARNINGS ${WHISPER_FATAL_WARNINGS})
|
||||||
|
|
||||||
#
|
# transition helpers
|
||||||
# whisper.coreml - Core ML support
|
function (whisper_option_depr TYPE OLD NEW)
|
||||||
#
|
if (${OLD})
|
||||||
|
message(${TYPE} "${OLD} is deprecated and will be removed in the future.\nUse ${NEW} instead\n")
|
||||||
if (WHISPER_COREML)
|
set(${NEW} ON)
|
||||||
set(TARGET whisper.coreml)
|
|
||||||
|
|
||||||
add_library(${TARGET}
|
|
||||||
coreml/whisper-encoder.h
|
|
||||||
coreml/whisper-encoder.mm
|
|
||||||
coreml/whisper-encoder-impl.h
|
|
||||||
coreml/whisper-encoder-impl.m
|
|
||||||
)
|
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
|
||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC
|
|
||||||
.
|
|
||||||
)
|
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE ${FOUNDATION_FRAMEWORK} ${COREML_FRAMEWORK})
|
|
||||||
|
|
||||||
set_target_properties(${TARGET} PROPERTIES
|
|
||||||
COMPILE_FLAGS "-fobjc-arc"
|
|
||||||
)
|
|
||||||
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_OPENVINO)
|
|
||||||
set(TARGET whisper.openvino)
|
|
||||||
|
|
||||||
add_library(${TARGET} OBJECT
|
|
||||||
openvino/whisper-openvino-encoder.h
|
|
||||||
openvino/whisper-openvino-encoder.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC
|
|
||||||
.
|
|
||||||
)
|
|
||||||
|
|
||||||
set_property(TARGET ${TARGET} PROPERTY POSITION_INDEPENDENT_CODE ON)
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_USE_OPENVINO)
|
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE openvino::runtime)
|
|
||||||
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
#
|
|
||||||
# whisper - this is the main library of the project
|
|
||||||
#
|
|
||||||
|
|
||||||
set(TARGET whisper)
|
|
||||||
|
|
||||||
add_library(${TARGET}
|
|
||||||
ggml.h
|
|
||||||
ggml.c
|
|
||||||
ggml-alloc.h
|
|
||||||
ggml-alloc.c
|
|
||||||
ggml-backend.h
|
|
||||||
ggml-backend.c
|
|
||||||
ggml-quants.h
|
|
||||||
ggml-quants.c
|
|
||||||
${GGML_SOURCES_METAL}
|
|
||||||
${GGML_SOURCES_CUDA}
|
|
||||||
${GGML_SOURCES_OPENCL}
|
|
||||||
${GGML_SOURCES_SYCL} ${GGML_HEADERS_SYCL}
|
|
||||||
${GGML_SOURCES_ROCM} ${GGML_HEADERS_ROCM}
|
|
||||||
whisper.h
|
|
||||||
whisper.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
if (WHISPER_CUDA)
|
|
||||||
target_sources(${TARGET} PRIVATE whisper-mel-cuda.cu)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
include_directories (
|
|
||||||
.
|
|
||||||
)
|
|
||||||
# Set the version numbers
|
|
||||||
set_target_properties(whisper PROPERTIES
|
|
||||||
VERSION ${PROJECT_VERSION}
|
|
||||||
SOVERSION ${SOVERSION}
|
|
||||||
)
|
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
|
||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC
|
|
||||||
.
|
|
||||||
)
|
|
||||||
|
|
||||||
if (WHISPER_COREML)
|
|
||||||
target_link_libraries(${TARGET} PRIVATE whisper.coreml)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_OPENVINO)
|
|
||||||
target_link_libraries(${TARGET} PRIVATE whisper.openvino)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WHISPER_MKL)
|
|
||||||
target_link_libraries(${TARGET} PUBLIC MKL::MKL)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (MSVC)
|
|
||||||
target_link_libraries(${TARGET} PRIVATE ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
|
||||||
|
|
||||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -D_CRT_SECURE_NO_WARNINGS)
|
|
||||||
else()
|
|
||||||
target_link_libraries(${TARGET} PRIVATE m ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (BUILD_SHARED_LIBS)
|
|
||||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
||||||
target_link_libraries(${TARGET} PUBLIC
|
|
||||||
${CMAKE_DL_LIBS}
|
|
||||||
)
|
|
||||||
|
|
||||||
target_compile_definitions(${TARGET} PUBLIC
|
|
||||||
WHISPER_SHARED
|
|
||||||
GGML_SHARED
|
|
||||||
)
|
|
||||||
|
|
||||||
target_compile_definitions(${TARGET} PRIVATE
|
|
||||||
WHISPER_BUILD
|
|
||||||
GGML_BUILD
|
|
||||||
)
|
|
||||||
|
|
||||||
if (WHISPER_METAL)
|
|
||||||
# TODO: I think this should make ggml-metal.m "see" the ggml-metal.metal file from the "bin" directory
|
|
||||||
# but for some reason it does not work here like it does in llama.cpp
|
|
||||||
set_target_properties(${TARGET} PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal")
|
|
||||||
endif()
|
endif()
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
whisper_option_depr(FATAL_ERROR WHISPER_CUBLAS GGML_CUDA)
|
||||||
|
whisper_option_depr(WARNING WHISPER_CUDA GGML_CUDA)
|
||||||
|
whisper_option_depr(WARNING WHISPER_KOMPUTE GGML_KOMPUTE)
|
||||||
|
whisper_option_depr(WARNING WHISPER_METAL GGML_METAL)
|
||||||
|
whisper_option_depr(WARNING WHISPER_METAL_EMBED_LIBRARY GGML_METAL_EMBED_LIBRARY)
|
||||||
|
whisper_option_depr(WARNING WHISPER_NATIVE GGML_NATIVE)
|
||||||
|
whisper_option_depr(WARNING WHISPER_OPENMP GGML_OPENMP)
|
||||||
|
whisper_option_depr(WARNING WHISPER_RPC GGML_RPC)
|
||||||
|
whisper_option_depr(WARNING WHISPER_SYCL GGML_SYCL)
|
||||||
|
whisper_option_depr(WARNING WHISPER_SYCL_F16 GGML_SYCL_F16)
|
||||||
|
|
||||||
|
#
|
||||||
|
# build the library
|
||||||
|
#
|
||||||
|
|
||||||
|
if (NOT TARGET ggml)
|
||||||
|
add_subdirectory(ggml)
|
||||||
|
# ... otherwise assume ggml is added by a parent CMakeLists.txt
|
||||||
endif()
|
endif()
|
||||||
|
add_subdirectory(src)
|
||||||
|
|
||||||
if (GGML_SOURCES_CUDA)
|
#
|
||||||
message(STATUS "GGML CUDA sources found, configuring CUDA architecture")
|
# install
|
||||||
# Only configure gmml CUDA architectures is not globally set
|
#
|
||||||
if (NOT DEFINED GGML_CUDA_ARCHITECTURES)
|
|
||||||
# Not overriden by user, so set defaults
|
|
||||||
set(GGML_CUDA_ARCHITECTURES 52 61 70)
|
|
||||||
endif()
|
|
||||||
message(STATUS "GGML Configuring CUDA architectures ${GGML_CUDA_ARCHITECTURES}")
|
|
||||||
set_property(TARGET whisper PROPERTY CUDA_ARCHITECTURES ${GGML_CUDA_ARCHITECTURES})
|
|
||||||
set_property(TARGET whisper PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (EMSCRIPTEN)
|
|
||||||
set_target_properties(${TARGET} PROPERTIES COMPILE_FLAGS "-msimd128")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
target_compile_definitions(${TARGET} PUBLIC
|
|
||||||
${WHISPER_EXTRA_FLAGS}
|
|
||||||
)
|
|
||||||
|
|
||||||
set_target_properties(${TARGET} PROPERTIES PUBLIC_HEADER "ggml.h;whisper.h")
|
|
||||||
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
|
||||||
|
|
||||||
include(GNUInstallDirs)
|
include(GNUInstallDirs)
|
||||||
|
include(CMakePackageConfigHelpers)
|
||||||
|
|
||||||
install(TARGETS ${TARGET}
|
set(WHISPER_BUILD_NUMBER ${BUILD_NUMBER})
|
||||||
LIBRARY DESTINATION lib
|
set(WHISPER_BUILD_COMMIT ${BUILD_COMMIT})
|
||||||
ARCHIVE DESTINATION lib/static
|
set(WHISPER_INSTALL_VERSION ${CMAKE_PROJECT_VERSION})
|
||||||
RUNTIME DESTINATION bin
|
|
||||||
RESOURCE DESTINATION bin
|
|
||||||
PUBLIC_HEADER DESTINATION include
|
|
||||||
)
|
|
||||||
|
|
||||||
#
|
set(WHISPER_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files")
|
||||||
# bindings
|
set(WHISPER_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files")
|
||||||
#
|
set(WHISPER_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files")
|
||||||
|
|
||||||
add_subdirectory(bindings)
|
get_directory_property(WHISPER_TRANSIENT_DEFINES COMPILE_DEFINITIONS)
|
||||||
|
|
||||||
|
set_target_properties(whisper PROPERTIES PUBLIC_HEADER ${CMAKE_CURRENT_SOURCE_DIR}/include/whisper.h)
|
||||||
|
install(TARGETS whisper LIBRARY PUBLIC_HEADER)
|
||||||
|
|
||||||
|
configure_package_config_file(
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/cmake/whisper-config.cmake.in
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/whisper-config.cmake
|
||||||
|
INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/whisper
|
||||||
|
PATH_VARS
|
||||||
|
WHISPER_INCLUDE_INSTALL_DIR
|
||||||
|
WHISPER_LIB_INSTALL_DIR
|
||||||
|
WHISPER_BIN_INSTALL_DIR )
|
||||||
|
|
||||||
|
write_basic_package_version_file(
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/whisper-version.cmake
|
||||||
|
VERSION ${WHISPER_INSTALL_VERSION}
|
||||||
|
COMPATIBILITY SameMajorVersion)
|
||||||
|
|
||||||
|
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/whisper-config.cmake
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/whisper-version.cmake
|
||||||
|
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/whisper)
|
||||||
|
|
||||||
|
configure_file(cmake/whisper.pc.in
|
||||||
|
"${CMAKE_CURRENT_BINARY_DIR}/whisper.pc"
|
||||||
|
@ONLY)
|
||||||
|
|
||||||
|
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/whisper.pc"
|
||||||
|
DESTINATION lib/pkgconfig)
|
||||||
|
|
||||||
#
|
#
|
||||||
# programs, examples and tests
|
# programs, examples and tests
|
||||||
#
|
#
|
||||||
|
|
||||||
if (WHISPER_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
if (WHISPER_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
||||||
enable_testing()
|
#include(CTest)
|
||||||
add_subdirectory(tests)
|
#add_subdirectory(tests)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (WHISPER_BUILD_EXAMPLES)
|
if (WHISPER_BUILD_EXAMPLES)
|
||||||
|
517
Makefile
517
Makefile
@ -1,504 +1,12 @@
|
|||||||
default: main bench quantize server
|
|
||||||
|
|
||||||
ifndef UNAME_S
|
|
||||||
UNAME_S := $(shell uname -s)
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifndef UNAME_P
|
|
||||||
UNAME_P := $(shell uname -p)
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifndef UNAME_M
|
|
||||||
UNAME_M := $(shell uname -m)
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifndef NVCC_VERSION
|
|
||||||
ifeq ($(call,$(shell which nvcc))$(.SHELLSTATUS),0)
|
|
||||||
NVCC_VERSION := $(shell nvcc --version | egrep -o "V[0-9]+.[0-9]+.[0-9]+" | cut -c2-)
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
# In GNU make default CXX is g++ instead of c++. Let's fix that so that users
|
|
||||||
# of non-gcc compilers don't have to provide g++ alias or wrapper.
|
|
||||||
DEFCC := cc
|
|
||||||
DEFCXX := c++
|
|
||||||
ifeq ($(origin CC),default)
|
|
||||||
CC := $(DEFCC)
|
|
||||||
endif
|
|
||||||
ifeq ($(origin CXX),default)
|
|
||||||
CXX := $(DEFCXX)
|
|
||||||
endif
|
|
||||||
|
|
||||||
CCV := $(shell $(CC) --version | head -n 1)
|
|
||||||
CXXV := $(shell $(CXX) --version | head -n 1)
|
|
||||||
|
|
||||||
# Mac OS + Arm can report x86_64
|
|
||||||
# ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789
|
|
||||||
ifeq ($(UNAME_S),Darwin)
|
|
||||||
ifneq ($(UNAME_P),arm)
|
|
||||||
SYSCTL_M := $(shell sysctl -n hw.optional.arm64)
|
|
||||||
ifeq ($(SYSCTL_M),1)
|
|
||||||
# UNAME_P := arm
|
|
||||||
# UNAME_M := arm64
|
|
||||||
warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789)
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
#
|
|
||||||
# Compile flags
|
|
||||||
#
|
|
||||||
|
|
||||||
CFLAGS = -I. -O3 -DNDEBUG -std=c11 -fPIC
|
|
||||||
CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC
|
|
||||||
LDFLAGS =
|
|
||||||
|
|
||||||
ifdef MACOSX_DEPLOYMENT_TARGET
|
|
||||||
CFLAGS += -mmacosx-version-min=$(MACOSX_DEPLOYMENT_TARGET)
|
|
||||||
CXXFLAGS += -mmacosx-version-min=$(MACOSX_DEPLOYMENT_TARGET)
|
|
||||||
LDFLAGS += -mmacosx-version-min=$(MACOSX_DEPLOYMENT_TARGET)
|
|
||||||
endif
|
|
||||||
|
|
||||||
# clock_gettime came in POSIX.1b (1993)
|
|
||||||
# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional
|
|
||||||
# posix_memalign came in POSIX.1-2001 / SUSv3
|
|
||||||
# M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985)
|
|
||||||
CFLAGS += -D_XOPEN_SOURCE=600
|
|
||||||
CXXFLAGS += -D_XOPEN_SOURCE=600
|
|
||||||
|
|
||||||
# Somehow in OpenBSD whenever POSIX conformance is specified
|
|
||||||
# some string functions rely on locale_t availability,
|
|
||||||
# which was introduced in POSIX.1-2008, forcing us to go higher
|
|
||||||
ifeq ($(UNAME_S),OpenBSD)
|
|
||||||
CFLAGS += -U_XOPEN_SOURCE -D_XOPEN_SOURCE=700
|
|
||||||
CXXFLAGS += -U_XOPEN_SOURCE -D_XOPEN_SOURCE=700
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Data types, macros and functions related to controlling CPU affinity
|
|
||||||
# are available on Linux through GNU extensions in libc
|
|
||||||
ifeq ($(UNAME_S),Linux)
|
|
||||||
CFLAGS += -D_GNU_SOURCE
|
|
||||||
CXXFLAGS += -D_GNU_SOURCE
|
|
||||||
endif
|
|
||||||
|
|
||||||
# RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1,
|
|
||||||
# and on macOS its availability depends on enabling Darwin extensions
|
|
||||||
# similarly on DragonFly, enabling BSD extensions is necessary
|
|
||||||
ifeq ($(UNAME_S),Darwin)
|
|
||||||
CFLAGS += -D_DARWIN_C_SOURCE
|
|
||||||
CXXFLAGS += -D_DARWIN_C_SOURCE
|
|
||||||
endif
|
|
||||||
ifeq ($(UNAME_S),DragonFly)
|
|
||||||
CFLAGS += -D__BSD_VISIBLE
|
|
||||||
CXXFLAGS += -D__BSD_VISIBLE
|
|
||||||
endif
|
|
||||||
|
|
||||||
# alloca is a non-standard interface that is not visible on BSDs when
|
|
||||||
# POSIX conformance is specified, but not all of them provide a clean way
|
|
||||||
# to enable it in such cases
|
|
||||||
ifeq ($(UNAME_S),FreeBSD)
|
|
||||||
CFLAGS += -D__BSD_VISIBLE
|
|
||||||
CXXFLAGS += -D__BSD_VISIBLE
|
|
||||||
endif
|
|
||||||
ifeq ($(UNAME_S),NetBSD)
|
|
||||||
CFLAGS += -D_NETBSD_SOURCE
|
|
||||||
CXXFLAGS += -D_NETBSD_SOURCE
|
|
||||||
endif
|
|
||||||
ifeq ($(UNAME_S),OpenBSD)
|
|
||||||
CFLAGS += -D_BSD_SOURCE
|
|
||||||
CXXFLAGS += -D_BSD_SOURCE
|
|
||||||
endif
|
|
||||||
|
|
||||||
# OS specific
|
|
||||||
# TODO: support Windows
|
|
||||||
ifeq ($(filter $(UNAME_S),Linux Darwin DragonFly FreeBSD NetBSD OpenBSD Haiku),$(UNAME_S))
|
|
||||||
CFLAGS += -pthread
|
|
||||||
CXXFLAGS += -pthread
|
|
||||||
endif
|
|
||||||
|
|
||||||
# detect Windows
|
|
||||||
ifneq ($(findstring _NT,$(UNAME_S)),)
|
|
||||||
_WIN32 := 1
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Windows Sockets 2 (Winsock) for network-capable apps
|
|
||||||
ifeq ($(_WIN32),1)
|
|
||||||
LWINSOCK2 := -lws2_32
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Architecture specific
|
|
||||||
# TODO: probably these flags need to be tweaked on some architectures
|
|
||||||
# feel free to update the Makefile for your architecture and send a pull request or issue
|
|
||||||
ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64))
|
|
||||||
ifeq ($(UNAME_S),Darwin)
|
|
||||||
CPUINFO_CMD := sysctl machdep.cpu.features machdep.cpu.leaf7_features
|
|
||||||
else ifeq ($(UNAME_S),Linux)
|
|
||||||
CPUINFO_CMD := cat /proc/cpuinfo
|
|
||||||
else ifneq (,$(filter MINGW32_NT% MINGW64_NT% MSYS_NT%,$(UNAME_S)))
|
|
||||||
CPUINFO_CMD := cat /proc/cpuinfo
|
|
||||||
else ifneq (,$(filter DragonFly FreeBSD,$(UNAME_S)))
|
|
||||||
CPUINFO_CMD := grep Features /var/run/dmesg.boot
|
|
||||||
else ifeq ($(UNAME_S),Haiku)
|
|
||||||
CPUINFO_CMD := sysinfo -cpu
|
|
||||||
endif
|
|
||||||
|
|
||||||
# x86 ISA extensions (chronological order)
|
|
||||||
ifdef CPUINFO_CMD
|
|
||||||
SSE3_M := $(shell $(CPUINFO_CMD) | grep -iwE 'PNI|SSE3')
|
|
||||||
SSSE3_M := $(shell $(CPUINFO_CMD) | grep -iw 'SSSE3')
|
|
||||||
AVX_M := $(shell $(CPUINFO_CMD) | grep -iwE 'AVX|AVX1.0')
|
|
||||||
F16C_M := $(shell $(CPUINFO_CMD) | grep -iw 'F16C')
|
|
||||||
FMA_M := $(shell $(CPUINFO_CMD) | grep -iw 'FMA')
|
|
||||||
AVX2_M := $(shell $(CPUINFO_CMD) | grep -iw 'AVX2')
|
|
||||||
AVX512F_M := $(shell $(CPUINFO_CMD) | grep -iw 'AVX512F')
|
|
||||||
AVX512VBMI_M := $(shell $(CPUINFO_CMD) | grep -iw 'AVX512VBMI')
|
|
||||||
AVX512VNNI_M := $(shell $(CPUINFO_CMD) | grep -iwE 'AVX512_VNNI|AVX512VNNI')
|
|
||||||
|
|
||||||
# AVX-512 has many subsets, so let's make it easy to disable them all
|
|
||||||
ifneq ($(filter-out 0,$(WHISPER_NO_AVX512)),)
|
|
||||||
AVX512F_M :=
|
|
||||||
AVX512VBMI_M :=
|
|
||||||
AVX512VNNI_M :=
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq (,$(SSE3_M))
|
|
||||||
CFLAGS += -msse3
|
|
||||||
CXXFLAGS += -msse3
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq (,$(SSSE3_M))
|
|
||||||
CFLAGS += -mssse3
|
|
||||||
CXXFLAGS += -mssse3
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq (,$(AVX_M))
|
|
||||||
CFLAGS += -mavx
|
|
||||||
CXXFLAGS += -mavx
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq (,$(F16C_M))
|
|
||||||
CFLAGS += -mf16c
|
|
||||||
CXXFLAGS += -mf16c
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq (,$(FMA_M))
|
|
||||||
CFLAGS += -mfma
|
|
||||||
CXXFLAGS += -mfma
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq (,$(AVX2_M))
|
|
||||||
CFLAGS += -mavx2
|
|
||||||
CXXFLAGS += -mavx2
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq (,$(AVX512F_M))
|
|
||||||
CFLAGS += -mavx512f -mavx512cd -mavx512vl -mavx512dq -mavx512bw
|
|
||||||
CXXFLAGS += -mavx512f -mavx512cd -mavx512vl -mavx512dq -mavx512bw
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq (,$(AVX512VBMI_M))
|
|
||||||
CFLAGS += -mavx512vbmi
|
|
||||||
CXXFLAGS += -mavx512vbmi
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq (,$(AVX512VNNI_M))
|
|
||||||
CFLAGS += -mavx512vnni
|
|
||||||
CXXFLAGS += -mavx512vnni
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(filter ppc64%,$(UNAME_M)),)
|
|
||||||
POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
|
|
||||||
ifneq (,$(findstring POWER9,$(POWER9_M)))
|
|
||||||
CFLAGS += -mpower9-vector
|
|
||||||
endif
|
|
||||||
# Require c++23's std::byteswap for big-endian support.
|
|
||||||
ifeq ($(UNAME_M),ppc64)
|
|
||||||
CXXFLAGS += -std=c++23 -DGGML_BIG_ENDIAN
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifndef WHISPER_NO_ACCELERATE
|
|
||||||
# Mac M1 - include Accelerate framework
|
|
||||||
ifeq ($(UNAME_S),Darwin)
|
|
||||||
CFLAGS += -DGGML_USE_ACCELERATE
|
|
||||||
CFLAGS += -DACCELERATE_NEW_LAPACK
|
|
||||||
CFLAGS += -DACCELERATE_LAPACK_ILP64
|
|
||||||
LDFLAGS += -framework Accelerate
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef WHISPER_COREML
|
|
||||||
CXXFLAGS += -DWHISPER_USE_COREML
|
|
||||||
LDFLAGS += -framework Foundation -framework CoreML
|
|
||||||
|
|
||||||
ifdef WHISPER_COREML_ALLOW_FALLBACK
|
|
||||||
CXXFLAGS += -DWHISPER_COREML_ALLOW_FALLBACK
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifndef WHISPER_NO_METAL
|
|
||||||
ifeq ($(UNAME_S),Darwin)
|
|
||||||
WHISPER_METAL := 1
|
|
||||||
|
|
||||||
CFLAGS += -DGGML_USE_METAL
|
|
||||||
CXXFLAGS += -DGGML_USE_METAL
|
|
||||||
LDFLAGS += -framework Foundation -framework Metal -framework MetalKit
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(filter-out 0,$(WHISPER_OPENBLAS)),) # OpenBLAS
|
|
||||||
WHISPER_OPENBLAS_INTERFACE64 ?= 0 # use 32-bit interface by default
|
|
||||||
ifneq ($(filter-out 0,$(WHISPER_OPENBLAS_INTERFACE64)),)
|
|
||||||
WHISPER_BLAS_LIB := openblas64
|
|
||||||
else
|
|
||||||
WHISPER_BLAS_LIB := openblas
|
|
||||||
endif
|
|
||||||
ifneq ($(OPENBLAS_PATH),)
|
|
||||||
WHISPER_BLAS_CFLAGS := -I$(OPENBLAS_PATH)/include
|
|
||||||
WHISPER_BLAS_LDFLAGS := -L$(OPENBLAS_PATH)/lib -l$(WHISPER_BLAS_LIB)
|
|
||||||
else
|
|
||||||
WHISPER_BLAS_LIB_PC_EXISTS := $(shell pkg-config --exists $(WHISPER_BLAS_LIB) && echo 1)
|
|
||||||
ifneq ($(filter-out 0,$(WHISPER_BLAS_LIB_PC_EXISTS)),)
|
|
||||||
WHISPER_BLAS_CFLAGS := $(shell pkg-config --cflags $(WHISPER_BLAS_LIB))
|
|
||||||
WHISPER_BLAS_LDFLAGS := $(shell pkg-config --libs $(WHISPER_BLAS_LIB))
|
|
||||||
else
|
|
||||||
WHISPER_BLAS_CFLAGS := -I/usr/include/openblas
|
|
||||||
WHISPER_BLAS_LDFLAGS := -l$(WHISPER_BLAS_LIB)
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
CFLAGS += $(WHISPER_BLAS_CFLAGS) -DGGML_USE_OPENBLAS
|
|
||||||
LDFLAGS += $(WHISPER_BLAS_LDFLAGS)
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef WHISPER_CUBLAS
|
|
||||||
# WHISPER_CUBLAS is deprecated and will be removed in the future
|
|
||||||
WHISPER_CUDA := 1
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef WHISPER_CUDA
|
|
||||||
ifeq ($(shell expr $(NVCC_VERSION) \>= 11.6), 1)
|
|
||||||
CUDA_ARCH_FLAG ?= native
|
|
||||||
else
|
|
||||||
CUDA_ARCH_FLAG ?= all
|
|
||||||
endif
|
|
||||||
|
|
||||||
CFLAGS += -DGGML_USE_CUDA -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include
|
|
||||||
CXXFLAGS += -DGGML_USE_CUDA -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include
|
|
||||||
LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lcufft -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib
|
|
||||||
WHISPER_OBJ += ggml-cuda.o whisper-mel-cuda.o
|
|
||||||
WHISPER_OBJ += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
|
|
||||||
NVCC = nvcc
|
|
||||||
NVCCFLAGS = --forward-unknown-to-host-compiler -arch=$(CUDA_ARCH_FLAG)
|
|
||||||
|
|
||||||
ggml-cuda/%.o: ggml-cuda/%.cu ggml-cuda/%.cuh ggml.h ggml-common.h ggml-cuda/common.cuh
|
|
||||||
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml.h ggml-backend.h ggml-backend-impl.h ggml-common.h $(wildcard ggml-cuda/*.cuh)
|
|
||||||
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@
|
|
||||||
|
|
||||||
whisper-mel-cuda.o: whisper-mel-cuda.cu whisper.h ggml.h ggml-backend.h whisper-mel.hpp whisper-mel-cuda.hpp
|
|
||||||
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -Wno-pedantic -c $< -o $@
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef WHISPER_HIPBLAS
|
|
||||||
ROCM_PATH ?= /opt/rocm
|
|
||||||
HIPCC ?= $(ROCM_PATH)/bin/hipcc
|
|
||||||
GPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch)
|
|
||||||
CFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUDA
|
|
||||||
CXXFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUDA
|
|
||||||
LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
|
|
||||||
LDFLAGS += -lhipblas -lamdhip64 -lrocblas
|
|
||||||
HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS))
|
|
||||||
WHISPER_OBJ += ggml-cuda.o
|
|
||||||
WHISPER_OBJ += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
|
|
||||||
|
|
||||||
ggml-cuda/%.o: ggml-cuda/%.cu ggml-cuda/%.cuh ggml.h ggml-common.h ggml-cuda/common.cuh
|
|
||||||
$(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
|
|
||||||
|
|
||||||
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml.h ggml-backend.h ggml-backend-impl.h ggml-common.h $(wildcard ggml-cuda/*.cuh)
|
|
||||||
$(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef WHISPER_CLBLAST
|
|
||||||
CFLAGS += -DGGML_USE_CLBLAST
|
|
||||||
CXXFLAGS += -DGGML_USE_CLBLAST
|
|
||||||
LDFLAGS += -lclblast
|
|
||||||
ifeq ($(UNAME_S),Darwin)
|
|
||||||
LDFLAGS += -framework OpenCL
|
|
||||||
else
|
|
||||||
LDFLAGS += -lOpenCL
|
|
||||||
endif
|
|
||||||
WHISPER_OBJ += ggml-opencl.o
|
|
||||||
|
|
||||||
ggml-opencl.o: ggml-opencl.cpp ggml-opencl.h
|
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef WHISPER_GPROF
|
|
||||||
CFLAGS += -pg
|
|
||||||
CXXFLAGS += -pg
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(filter aarch64%,$(UNAME_M)),)
|
|
||||||
CFLAGS += -mcpu=native
|
|
||||||
CXXFLAGS += -mcpu=native
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(filter armv6%,$(UNAME_M)),)
|
|
||||||
# 32-bit Raspberry Pi 1, 2, 3
|
|
||||||
CFLAGS += -mfpu=neon -mfp16-format=ieee -mno-unaligned-access
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(filter armv7%,$(UNAME_M)),)
|
|
||||||
# 32-bit ARM, for example on Armbian or possibly raspbian
|
|
||||||
#CFLAGS += -mfpu=neon -mfp16-format=ieee -funsafe-math-optimizations -mno-unaligned-access
|
|
||||||
#CXXFLAGS += -mfpu=neon -mfp16-format=ieee -funsafe-math-optimizations -mno-unaligned-access
|
|
||||||
|
|
||||||
# 64-bit ARM on 32-bit OS, use these (TODO: auto-detect 64-bit)
|
|
||||||
CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -funsafe-math-optimizations -mno-unaligned-access
|
|
||||||
CXXFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -funsafe-math-optimizations -mno-unaligned-access
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(filter armv8%,$(UNAME_M)),)
|
|
||||||
# Raspberry Pi 4
|
|
||||||
CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -funsafe-math-optimizations -mno-unaligned-access
|
|
||||||
CXXFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -funsafe-math-optimizations -mno-unaligned-access
|
|
||||||
endif
|
|
||||||
|
|
||||||
#
|
|
||||||
# Print build information
|
|
||||||
#
|
|
||||||
|
|
||||||
$(info I whisper.cpp build info: )
|
|
||||||
$(info I UNAME_S: $(UNAME_S))
|
|
||||||
$(info I UNAME_P: $(UNAME_P))
|
|
||||||
$(info I UNAME_M: $(UNAME_M))
|
|
||||||
$(info I CFLAGS: $(CFLAGS))
|
|
||||||
$(info I CXXFLAGS: $(CXXFLAGS))
|
|
||||||
$(info I LDFLAGS: $(LDFLAGS))
|
|
||||||
$(info I CC: $(CCV))
|
|
||||||
$(info I CXX: $(CXXV))
|
|
||||||
$(info )
|
|
||||||
|
|
||||||
ifdef WHISPER_CUBLAS
|
|
||||||
$(info !!!!)
|
|
||||||
$(info WHISPER_CUBLAS is deprecated and will be removed in the future. Use WHISPER_CUDA instead.)
|
|
||||||
$(info !!!!)
|
|
||||||
$(info )
|
|
||||||
endif
|
|
||||||
|
|
||||||
#
|
|
||||||
# Build library
|
|
||||||
#
|
|
||||||
|
|
||||||
ggml.o: ggml.c ggml.h ggml-cuda.h
|
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
ggml-alloc.o: ggml-alloc.c ggml.h ggml-alloc.h
|
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
ggml-backend.o: ggml-backend.c ggml.h ggml-backend.h
|
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
ggml-quants.o: ggml-quants.c ggml.h ggml-quants.h
|
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
WHISPER_OBJ += ggml.o ggml-alloc.o ggml-backend.o ggml-quants.o
|
|
||||||
|
|
||||||
whisper.o: whisper.cpp whisper.h whisper-mel.hpp ggml.h ggml-cuda.h
|
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
ifndef WHISPER_COREML
|
|
||||||
WHISPER_OBJ += whisper.o
|
|
||||||
else
|
|
||||||
whisper-encoder.o: coreml/whisper-encoder.mm coreml/whisper-encoder.h
|
|
||||||
$(CXX) -O3 -I . -fobjc-arc -c coreml/whisper-encoder.mm -o whisper-encoder.o
|
|
||||||
|
|
||||||
whisper-encoder-impl.o: coreml/whisper-encoder-impl.m coreml/whisper-encoder-impl.h
|
|
||||||
$(CXX) -O3 -I . -fobjc-arc -c coreml/whisper-encoder-impl.m -o whisper-encoder-impl.o
|
|
||||||
|
|
||||||
WHISPER_OBJ += whisper.o whisper-encoder.o whisper-encoder-impl.o
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef WHISPER_METAL
|
|
||||||
ggml-metal.o: ggml-metal.m ggml-metal.h
|
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
WHISPER_OBJ += ggml-metal.o
|
|
||||||
|
|
||||||
ifdef WHISPER_METAL_EMBED_LIBRARY
|
|
||||||
CFLAGS += -DGGML_METAL_EMBED_LIBRARY
|
|
||||||
|
|
||||||
ggml-metal-embed.o: ggml-metal.metal ggml-common.h
|
|
||||||
@echo "Embedding Metal library"
|
|
||||||
$(eval TEMP_ASSEMBLY=$(shell mktemp))
|
|
||||||
$(eval TEMP_METALLIB=$(shell mktemp))
|
|
||||||
@sed "/^#include \"ggml-common.h\"/{r ggml-common.h"$$'\n'"d;}" ggml-metal.metal > $(TEMP_METALLIB)
|
|
||||||
@echo ".section __DATA, __ggml_metallib" > $(TEMP_ASSEMBLY)
|
|
||||||
@echo ".globl _ggml_metallib_start" >> $(TEMP_ASSEMBLY)
|
|
||||||
@echo "_ggml_metallib_start:" >> $(TEMP_ASSEMBLY)
|
|
||||||
@echo ".incbin \"$(TEMP_METALLIB)\"" >> $(TEMP_ASSEMBLY)
|
|
||||||
@echo ".globl _ggml_metallib_end" >> $(TEMP_ASSEMBLY)
|
|
||||||
@echo "_ggml_metallib_end:" >> $(TEMP_ASSEMBLY)
|
|
||||||
@$(AS) $(TEMP_ASSEMBLY) -o $@
|
|
||||||
@rm -f $(TEMP_ASSEMBLY) $(TEMP_METALLIB)
|
|
||||||
|
|
||||||
WHISPER_OBJ += ggml-metal-embed.o
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
libwhisper.a: $(WHISPER_OBJ)
|
|
||||||
$(AR) rcs libwhisper.a $(WHISPER_OBJ)
|
|
||||||
|
|
||||||
libwhisper.so: $(WHISPER_OBJ)
|
|
||||||
$(CXX) $(CXXFLAGS) -shared -o libwhisper.so $(WHISPER_OBJ) $(LDFLAGS)
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -f *.o main stream command talk talk-llama bench quantize server lsp libwhisper.a libwhisper.so
|
|
||||||
|
|
||||||
#
|
|
||||||
# Examples
|
|
||||||
#
|
|
||||||
|
|
||||||
CC_SDL=`sdl2-config --cflags --libs`
|
|
||||||
|
|
||||||
SRC_COMMON = examples/common.cpp examples/common-ggml.cpp examples/grammar-parser.cpp
|
|
||||||
SRC_COMMON_SDL = examples/common-sdl.cpp
|
|
||||||
|
|
||||||
main: examples/main/main.cpp $(SRC_COMMON) $(WHISPER_OBJ)
|
|
||||||
$(CXX) $(CXXFLAGS) examples/main/main.cpp $(SRC_COMMON) $(WHISPER_OBJ) -o main $(LDFLAGS)
|
|
||||||
./main -h
|
|
||||||
|
|
||||||
bench: examples/bench/bench.cpp $(WHISPER_OBJ)
|
|
||||||
$(CXX) $(CXXFLAGS) examples/bench/bench.cpp $(WHISPER_OBJ) -o bench $(LDFLAGS)
|
|
||||||
|
|
||||||
quantize: examples/quantize/quantize.cpp $(WHISPER_OBJ) $(SRC_COMMON)
|
|
||||||
$(CXX) $(CXXFLAGS) examples/quantize/quantize.cpp $(SRC_COMMON) $(WHISPER_OBJ) -o quantize $(LDFLAGS)
|
|
||||||
|
|
||||||
server: examples/server/server.cpp $(SRC_COMMON) $(WHISPER_OBJ)
|
|
||||||
$(CXX) $(CXXFLAGS) examples/server/server.cpp $(SRC_COMMON) $(WHISPER_OBJ) -o server $(LDFLAGS) $(LWINSOCK2)
|
|
||||||
|
|
||||||
stream: examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ)
|
|
||||||
$(CXX) $(CXXFLAGS) examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o stream $(CC_SDL) $(LDFLAGS)
|
|
||||||
|
|
||||||
command: examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ)
|
|
||||||
$(CXX) $(CXXFLAGS) examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o command $(CC_SDL) $(LDFLAGS)
|
|
||||||
|
|
||||||
lsp: examples/lsp/lsp.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ)
|
|
||||||
$(CXX) $(CXXFLAGS) examples/lsp/lsp.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o lsp $(CC_SDL) $(LDFLAGS)
|
|
||||||
|
|
||||||
talk: examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ)
|
|
||||||
$(CXX) $(CXXFLAGS) examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o talk $(CC_SDL) $(LDFLAGS)
|
|
||||||
|
|
||||||
talk-llama: examples/talk-llama/talk-llama.cpp examples/talk-llama/llama.cpp examples/talk-llama/unicode.cpp examples/talk-llama/unicode-data.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ)
|
|
||||||
$(CXX) $(CXXFLAGS) examples/talk-llama/talk-llama.cpp examples/talk-llama/llama.cpp examples/talk-llama/unicode.cpp examples/talk-llama/unicode-data.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) $(WHISPER_OBJ) -o talk-llama $(CC_SDL) $(LDFLAGS)
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Audio samples
|
# Audio samples
|
||||||
#
|
#
|
||||||
|
|
||||||
|
.PHONY: build
|
||||||
|
build:
|
||||||
|
cmake -B build
|
||||||
|
cmake --build build --config Release
|
||||||
|
|
||||||
# download a few audio samples into folder "./samples":
|
# download a few audio samples into folder "./samples":
|
||||||
.PHONY: samples
|
.PHONY: samples
|
||||||
samples:
|
samples:
|
||||||
@ -540,9 +48,12 @@ samples:
|
|||||||
.PHONY: large-v1
|
.PHONY: large-v1
|
||||||
.PHONY: large-v2
|
.PHONY: large-v2
|
||||||
.PHONY: large-v3
|
.PHONY: large-v3
|
||||||
|
.PHONY: large-v3-turbo
|
||||||
|
|
||||||
tiny.en tiny base.en base small.en small medium.en medium large-v1 large-v2 large-v3: main
|
tiny.en tiny base.en base small.en small medium.en medium large-v1 large-v2 large-v3 large-v3-turbo:
|
||||||
bash ./models/download-ggml-model.sh $@
|
bash ./models/download-ggml-model.sh $@
|
||||||
|
cmake -B build
|
||||||
|
cmake --build build --config Release
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "==============================================="
|
@echo "==============================================="
|
||||||
@echo "Running $@ on all samples in ./samples ..."
|
@echo "Running $@ on all samples in ./samples ..."
|
||||||
@ -553,14 +64,6 @@ tiny.en tiny base.en base small.en small medium.en medium large-v1 large-v2 larg
|
|||||||
echo "[+] Running $@ on $$f ... (run 'ffplay $$f' to listen)" ; \
|
echo "[+] Running $@ on $$f ... (run 'ffplay $$f' to listen)" ; \
|
||||||
echo "----------------------------------------------" ; \
|
echo "----------------------------------------------" ; \
|
||||||
echo "" ; \
|
echo "" ; \
|
||||||
./main -m models/ggml-$@.bin -f $$f ; \
|
./build/bin/whisper-cli -m models/ggml-$@.bin -f $$f ; \
|
||||||
echo "" ; \
|
echo "" ; \
|
||||||
done
|
done
|
||||||
|
|
||||||
#
|
|
||||||
# Tests
|
|
||||||
#
|
|
||||||
|
|
||||||
.PHONY: tests
|
|
||||||
tests:
|
|
||||||
bash ./tests/run-tests.sh $(word 2, $(MAKECMDGOALS))
|
|
||||||
|
@ -14,48 +14,6 @@ let package = Package(
|
|||||||
.library(name: "whisper", targets: ["whisper"]),
|
.library(name: "whisper", targets: ["whisper"]),
|
||||||
],
|
],
|
||||||
targets: [
|
targets: [
|
||||||
.target(
|
.systemLibrary(name: "whisper", pkgConfig: "whisper"),
|
||||||
name: "whisper",
|
]
|
||||||
path: ".",
|
|
||||||
exclude: [
|
|
||||||
"bindings",
|
|
||||||
"cmake",
|
|
||||||
"coreml",
|
|
||||||
"examples",
|
|
||||||
"extra",
|
|
||||||
"models",
|
|
||||||
"samples",
|
|
||||||
"tests",
|
|
||||||
"CMakeLists.txt",
|
|
||||||
"ggml-cuda.cu",
|
|
||||||
"ggml-cuda.h",
|
|
||||||
"Makefile"
|
|
||||||
],
|
|
||||||
sources: [
|
|
||||||
"ggml.c",
|
|
||||||
"whisper.cpp",
|
|
||||||
"ggml-alloc.c",
|
|
||||||
"ggml-backend.c",
|
|
||||||
"ggml-quants.c",
|
|
||||||
"ggml-metal.m"
|
|
||||||
],
|
|
||||||
resources: [.process("ggml-metal.metal")],
|
|
||||||
publicHeadersPath: "spm-headers",
|
|
||||||
cSettings: [
|
|
||||||
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
|
|
||||||
.define("GGML_USE_ACCELERATE"),
|
|
||||||
.unsafeFlags(["-fno-objc-arc"]),
|
|
||||||
.define("GGML_USE_METAL")
|
|
||||||
// NOTE: NEW_LAPACK will required iOS version 16.4+
|
|
||||||
// We should consider add this in the future when we drop support for iOS 14
|
|
||||||
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
|
|
||||||
// .define("ACCELERATE_NEW_LAPACK"),
|
|
||||||
// .define("ACCELERATE_LAPACK_ILP64")
|
|
||||||
],
|
|
||||||
linkerSettings: [
|
|
||||||
.linkedFramework("Accelerate")
|
|
||||||
]
|
|
||||||
)
|
|
||||||
],
|
|
||||||
cxxLanguageStandard: .cxx11
|
|
||||||
)
|
)
|
||||||
|
425
README.md
425
README.md
@ -7,22 +7,23 @@
|
|||||||
[](https://conan.io/center/whisper-cpp)
|
[](https://conan.io/center/whisper-cpp)
|
||||||
[](https://www.npmjs.com/package/whisper.cpp/)
|
[](https://www.npmjs.com/package/whisper.cpp/)
|
||||||
|
|
||||||
Stable: [v1.6.2](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.6.0) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
Stable: [v1.7.4](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.7.4) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
||||||
|
|
||||||
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
||||||
|
|
||||||
- Plain C/C++ implementation without dependencies
|
- Plain C/C++ implementation without dependencies
|
||||||
- Apple Silicon first-class citizen - optimized via ARM NEON, Accelerate framework, Metal and [Core ML](https://github.com/ggerganov/whisper.cpp#core-ml-support)
|
- Apple Silicon first-class citizen - optimized via ARM NEON, Accelerate framework, Metal and [Core ML](#core-ml-support)
|
||||||
- AVX intrinsics support for x86 architectures
|
- AVX intrinsics support for x86 architectures
|
||||||
- VSX intrinsics support for POWER architectures
|
- VSX intrinsics support for POWER architectures
|
||||||
- Mixed F16 / F32 precision
|
- Mixed F16 / F32 precision
|
||||||
- [4-bit and 5-bit integer quantization support](https://github.com/ggerganov/whisper.cpp#quantization)
|
- [Integer quantization support](#quantization)
|
||||||
- Zero memory allocations at runtime
|
- Zero memory allocations at runtime
|
||||||
|
- [Vulkan support](#vulkan-gpu-support)
|
||||||
- Support for CPU-only inference
|
- Support for CPU-only inference
|
||||||
- [Efficient GPU support for NVIDIA](https://github.com/ggerganov/whisper.cpp#nvidia-gpu-support-via-cublas)
|
- [Efficient GPU support for NVIDIA](#nvidia-gpu-support)
|
||||||
- [Partial OpenCL GPU support via CLBlast](https://github.com/ggerganov/whisper.cpp#opencl-gpu-support-via-clblast)
|
- [OpenVINO Support](#openvino-support)
|
||||||
- [OpenVINO Support](https://github.com/ggerganov/whisper.cpp#openvino-support)
|
- [Ascend NPU Support](#ascend-npu-support)
|
||||||
- [C-style API](https://github.com/ggerganov/whisper.cpp/blob/master/whisper.h)
|
- [C-style API](https://github.com/ggerganov/whisper.cpp/blob/master/include/whisper.h)
|
||||||
|
|
||||||
Supported platforms:
|
Supported platforms:
|
||||||
|
|
||||||
@ -34,9 +35,9 @@ Supported platforms:
|
|||||||
- [x] [WebAssembly](examples/whisper.wasm)
|
- [x] [WebAssembly](examples/whisper.wasm)
|
||||||
- [x] Windows ([MSVC](https://github.com/ggerganov/whisper.cpp/blob/master/.github/workflows/build.yml#L117-L144) and [MinGW](https://github.com/ggerganov/whisper.cpp/issues/168)]
|
- [x] Windows ([MSVC](https://github.com/ggerganov/whisper.cpp/blob/master/.github/workflows/build.yml#L117-L144) and [MinGW](https://github.com/ggerganov/whisper.cpp/issues/168)]
|
||||||
- [x] [Raspberry Pi](https://github.com/ggerganov/whisper.cpp/discussions/166)
|
- [x] [Raspberry Pi](https://github.com/ggerganov/whisper.cpp/discussions/166)
|
||||||
- [x] [docker](https://github.com/ggerganov/whisper.cpp/pkgs/container/whisper.cpp)
|
- [x] [Docker](https://github.com/ggerganov/whisper.cpp/pkgs/container/whisper.cpp)
|
||||||
|
|
||||||
The entire high-level implementation of the model is contained in [whisper.h](whisper.h) and [whisper.cpp](whisper.cpp).
|
The entire high-level implementation of the model is contained in [whisper.h](include/whisper.h) and [whisper.cpp](src/whisper.cpp).
|
||||||
The rest of the code is part of the [`ggml`](https://github.com/ggerganov/ggml) machine learning library.
|
The rest of the code is part of the [`ggml`](https://github.com/ggerganov/ggml) machine learning library.
|
||||||
|
|
||||||
Having such a lightweight implementation of the model allows to easily integrate it in different platforms and applications.
|
Having such a lightweight implementation of the model allows to easily integrate it in different platforms and applications.
|
||||||
@ -52,18 +53,6 @@ On Apple Silicon, the inference runs fully on the GPU via Metal:
|
|||||||
|
|
||||||
https://github.com/ggerganov/whisper.cpp/assets/1991296/c82e8f86-60dc-49f2-b048-d2fdbd6b5225
|
https://github.com/ggerganov/whisper.cpp/assets/1991296/c82e8f86-60dc-49f2-b048-d2fdbd6b5225
|
||||||
|
|
||||||
Or you can even run it straight in the browser: [talk.wasm](examples/talk.wasm)
|
|
||||||
|
|
||||||
## Implementation details
|
|
||||||
|
|
||||||
- The core tensor operations are implemented in C ([ggml.h](ggml.h) / [ggml.c](ggml.c))
|
|
||||||
- The transformer model and the high-level C-style API are implemented in C++ ([whisper.h](whisper.h) / [whisper.cpp](whisper.cpp))
|
|
||||||
- Sample usage is demonstrated in [main.cpp](examples/main)
|
|
||||||
- Sample real-time audio transcription from the microphone is demonstrated in [stream.cpp](examples/stream)
|
|
||||||
- Various other examples are available in the [examples](examples) folder
|
|
||||||
|
|
||||||
The tensor operators are optimized heavily for Apple silicon CPUs. Depending on the computation size, Arm Neon SIMD intrinsics or CBLAS Accelerate framework routines are used. The latter are especially effective for bigger sizes since the Accelerate framework utilizes the special-purpose AMX coprocessor available in modern Apple products.
|
|
||||||
|
|
||||||
## Quick start
|
## Quick start
|
||||||
|
|
||||||
First clone the repository:
|
First clone the repository:
|
||||||
@ -72,140 +61,38 @@ First clone the repository:
|
|||||||
git clone https://github.com/ggerganov/whisper.cpp.git
|
git clone https://github.com/ggerganov/whisper.cpp.git
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Navigate into the directory:
|
||||||
|
|
||||||
|
```
|
||||||
|
cd whisper.cpp
|
||||||
|
```
|
||||||
|
|
||||||
Then, download one of the Whisper [models](models/README.md) converted in [`ggml` format](#ggml-format). For example:
|
Then, download one of the Whisper [models](models/README.md) converted in [`ggml` format](#ggml-format). For example:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bash ./models/download-ggml-model.sh base.en
|
sh ./models/download-ggml-model.sh base.en
|
||||||
```
|
```
|
||||||
|
|
||||||
Now build the [main](examples/main) example and transcribe an audio file like this:
|
Now build the [whisper-cli](examples/cli) example and transcribe an audio file like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# build the main example
|
# build the project
|
||||||
make
|
cmake -B build
|
||||||
|
cmake --build build --config Release
|
||||||
|
|
||||||
# transcribe an audio file
|
# transcribe an audio file
|
||||||
./main -f samples/jfk.wav
|
./build/bin/whisper-cli -f samples/jfk.wav
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
For a quick demo, simply run `make base.en`:
|
For a quick demo, simply run `make base.en`.
|
||||||
|
|
||||||
```text
|
|
||||||
$ make base.en
|
|
||||||
|
|
||||||
cc -I. -O3 -std=c11 -pthread -DGGML_USE_ACCELERATE -c ggml.c -o ggml.o
|
|
||||||
c++ -I. -I./examples -O3 -std=c++11 -pthread -c whisper.cpp -o whisper.o
|
|
||||||
c++ -I. -I./examples -O3 -std=c++11 -pthread examples/main/main.cpp whisper.o ggml.o -o main -framework Accelerate
|
|
||||||
./main -h
|
|
||||||
|
|
||||||
usage: ./main [options] file0.wav file1.wav ...
|
|
||||||
|
|
||||||
options:
|
|
||||||
-h, --help [default] show this help message and exit
|
|
||||||
-t N, --threads N [4 ] number of threads to use during computation
|
|
||||||
-p N, --processors N [1 ] number of processors to use during computation
|
|
||||||
-ot N, --offset-t N [0 ] time offset in milliseconds
|
|
||||||
-on N, --offset-n N [0 ] segment index offset
|
|
||||||
-d N, --duration N [0 ] duration of audio to process in milliseconds
|
|
||||||
-mc N, --max-context N [-1 ] maximum number of text context tokens to store
|
|
||||||
-ml N, --max-len N [0 ] maximum segment length in characters
|
|
||||||
-sow, --split-on-word [false ] split on word rather than on token
|
|
||||||
-bo N, --best-of N [5 ] number of best candidates to keep
|
|
||||||
-bs N, --beam-size N [5 ] beam size for beam search
|
|
||||||
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
|
||||||
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
|
||||||
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
|
||||||
-debug, --debug-mode [false ] enable debug mode (eg. dump log_mel)
|
|
||||||
-tr, --translate [false ] translate from source language to english
|
|
||||||
-di, --diarize [false ] stereo audio diarization
|
|
||||||
-tdrz, --tinydiarize [false ] enable tinydiarize (requires a tdrz model)
|
|
||||||
-nf, --no-fallback [false ] do not use temperature fallback while decoding
|
|
||||||
-otxt, --output-txt [false ] output result in a text file
|
|
||||||
-ovtt, --output-vtt [false ] output result in a vtt file
|
|
||||||
-osrt, --output-srt [false ] output result in a srt file
|
|
||||||
-olrc, --output-lrc [false ] output result in a lrc file
|
|
||||||
-owts, --output-words [false ] output script for generating karaoke video
|
|
||||||
-fp, --font-path [/System/Library/Fonts/Supplemental/Courier New Bold.ttf] path to a monospace font for karaoke video
|
|
||||||
-ocsv, --output-csv [false ] output result in a CSV file
|
|
||||||
-oj, --output-json [false ] output result in a JSON file
|
|
||||||
-ojf, --output-json-full [false ] include more information in the JSON file
|
|
||||||
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
|
|
||||||
-ps, --print-special [false ] print special tokens
|
|
||||||
-pc, --print-colors [false ] print colors
|
|
||||||
-pp, --print-progress [false ] print progress
|
|
||||||
-nt, --no-timestamps [false ] do not print timestamps
|
|
||||||
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
|
||||||
-dl, --detect-language [false ] exit after automatically detecting language
|
|
||||||
--prompt PROMPT [ ] initial prompt
|
|
||||||
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
|
||||||
-f FNAME, --file FNAME [ ] input WAV file path
|
|
||||||
-oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference
|
|
||||||
-ls, --log-score [false ] log best decoder scores of tokens
|
|
||||||
-ng, --no-gpu [false ] disable GPU
|
|
||||||
|
|
||||||
|
|
||||||
bash ./models/download-ggml-model.sh base.en
|
|
||||||
Downloading ggml model base.en ...
|
|
||||||
ggml-base.en.bin 100%[========================>] 141.11M 6.34MB/s in 24s
|
|
||||||
Done! Model 'base.en' saved in 'models/ggml-base.en.bin'
|
|
||||||
You can now use it like this:
|
|
||||||
|
|
||||||
$ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
|
||||||
|
|
||||||
|
|
||||||
===============================================
|
|
||||||
Running base.en on all samples in ./samples ...
|
|
||||||
===============================================
|
|
||||||
|
|
||||||
----------------------------------------------
|
|
||||||
[+] Running base.en on samples/jfk.wav ... (run 'ffplay samples/jfk.wav' to listen)
|
|
||||||
----------------------------------------------
|
|
||||||
|
|
||||||
whisper_init_from_file: loading model from 'models/ggml-base.en.bin'
|
|
||||||
whisper_model_load: loading model
|
|
||||||
whisper_model_load: n_vocab = 51864
|
|
||||||
whisper_model_load: n_audio_ctx = 1500
|
|
||||||
whisper_model_load: n_audio_state = 512
|
|
||||||
whisper_model_load: n_audio_head = 8
|
|
||||||
whisper_model_load: n_audio_layer = 6
|
|
||||||
whisper_model_load: n_text_ctx = 448
|
|
||||||
whisper_model_load: n_text_state = 512
|
|
||||||
whisper_model_load: n_text_head = 8
|
|
||||||
whisper_model_load: n_text_layer = 6
|
|
||||||
whisper_model_load: n_mels = 80
|
|
||||||
whisper_model_load: f16 = 1
|
|
||||||
whisper_model_load: type = 2
|
|
||||||
whisper_model_load: mem required = 215.00 MB (+ 6.00 MB per decoder)
|
|
||||||
whisper_model_load: kv self size = 5.25 MB
|
|
||||||
whisper_model_load: kv cross size = 17.58 MB
|
|
||||||
whisper_model_load: adding 1607 extra tokens
|
|
||||||
whisper_model_load: model ctx = 140.60 MB
|
|
||||||
whisper_model_load: model size = 140.54 MB
|
|
||||||
|
|
||||||
system_info: n_threads = 4 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 |
|
|
||||||
|
|
||||||
main: processing 'samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
|
|
||||||
|
|
||||||
|
|
||||||
[00:00:00.000 --> 00:00:11.000] And so my fellow Americans, ask not what your country can do for you, ask what you can do for your country.
|
|
||||||
|
|
||||||
|
|
||||||
whisper_print_timings: fallbacks = 0 p / 0 h
|
|
||||||
whisper_print_timings: load time = 113.81 ms
|
|
||||||
whisper_print_timings: mel time = 15.40 ms
|
|
||||||
whisper_print_timings: sample time = 11.58 ms / 27 runs ( 0.43 ms per run)
|
|
||||||
whisper_print_timings: encode time = 266.60 ms / 1 runs ( 266.60 ms per run)
|
|
||||||
whisper_print_timings: decode time = 66.11 ms / 27 runs ( 2.45 ms per run)
|
|
||||||
whisper_print_timings: total time = 476.31 ms
|
|
||||||
```
|
|
||||||
|
|
||||||
The command downloads the `base.en` model converted to custom `ggml` format and runs the inference on all `.wav` samples in the folder `samples`.
|
The command downloads the `base.en` model converted to custom `ggml` format and runs the inference on all `.wav` samples in the folder `samples`.
|
||||||
|
|
||||||
For detailed usage instructions, run: `./main -h`
|
For detailed usage instructions, run: `./build/bin/whisper-cli -h`
|
||||||
|
|
||||||
Note that the [main](examples/main) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool.
|
Note that the [whisper-cli](examples/cli) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool.
|
||||||
For example, you can use `ffmpeg` like this:
|
For example, you can use `ffmpeg` like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -217,7 +104,7 @@ ffmpeg -i input.mp3 -ar 16000 -ac 1 -c:a pcm_s16le output.wav
|
|||||||
If you want some extra audio samples to play with, simply run:
|
If you want some extra audio samples to play with, simply run:
|
||||||
|
|
||||||
```
|
```
|
||||||
make samples
|
make -j samples
|
||||||
```
|
```
|
||||||
|
|
||||||
This will download a few more audio files from Wikipedia and convert them to 16-bit WAV format via `ffmpeg`.
|
This will download a few more audio files from Wikipedia and convert them to 16-bit WAV format via `ffmpeg`.
|
||||||
@ -225,17 +112,18 @@ This will download a few more audio files from Wikipedia and convert them to 16-
|
|||||||
You can download and run the other models as follows:
|
You can download and run the other models as follows:
|
||||||
|
|
||||||
```
|
```
|
||||||
make tiny.en
|
make -j tiny.en
|
||||||
make tiny
|
make -j tiny
|
||||||
make base.en
|
make -j base.en
|
||||||
make base
|
make -j base
|
||||||
make small.en
|
make -j small.en
|
||||||
make small
|
make -j small
|
||||||
make medium.en
|
make -j medium.en
|
||||||
make medium
|
make -j medium
|
||||||
make large-v1
|
make -j large-v1
|
||||||
make large-v2
|
make -j large-v2
|
||||||
make large-v3
|
make -j large-v3
|
||||||
|
make -j large-v3-turbo
|
||||||
```
|
```
|
||||||
|
|
||||||
## Memory usage
|
## Memory usage
|
||||||
@ -257,11 +145,12 @@ Here are the steps for creating and using a quantized model:
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# quantize a model with Q5_0 method
|
# quantize a model with Q5_0 method
|
||||||
make quantize
|
cmake -B build
|
||||||
./quantize models/ggml-base.en.bin models/ggml-base.en-q5_0.bin q5_0
|
cmake --build build --config Release
|
||||||
|
./build/bin/quantize models/ggml-base.en.bin models/ggml-base.en-q5_0.bin q5_0
|
||||||
|
|
||||||
# run the examples as usual, specifying the quantized model file
|
# run the examples as usual, specifying the quantized model file
|
||||||
./main -m models/ggml-base.en-q5_0.bin ./samples/gb0.wav
|
./build/bin/whisper-cli -m models/ggml-base.en-q5_0.bin ./samples/gb0.wav
|
||||||
```
|
```
|
||||||
|
|
||||||
## Core ML support
|
## Core ML support
|
||||||
@ -295,10 +184,6 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in
|
|||||||
- Build `whisper.cpp` with Core ML support:
|
- Build `whisper.cpp` with Core ML support:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# using Makefile
|
|
||||||
make clean
|
|
||||||
WHISPER_COREML=1 make -j
|
|
||||||
|
|
||||||
# using CMake
|
# using CMake
|
||||||
cmake -B build -DWHISPER_COREML=1
|
cmake -B build -DWHISPER_COREML=1
|
||||||
cmake --build build -j --config Release
|
cmake --build build -j --config Release
|
||||||
@ -307,7 +192,7 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in
|
|||||||
- Run the examples as usual. For example:
|
- Run the examples as usual. For example:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
$ ./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/jfk.wav
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
||||||
@ -391,7 +276,7 @@ This can result in significant speedup in encoder performance. Here are the inst
|
|||||||
- Run the examples as usual. For example:
|
- Run the examples as usual. For example:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
|
$ ./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/jfk.wav
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
||||||
@ -408,7 +293,7 @@ This can result in significant speedup in encoder performance. Here are the inst
|
|||||||
The first time run on an OpenVINO device is slow, since the OpenVINO framework will compile the IR (Intermediate Representation) model to a device-specific 'blob'. This device-specific blob will get
|
The first time run on an OpenVINO device is slow, since the OpenVINO framework will compile the IR (Intermediate Representation) model to a device-specific 'blob'. This device-specific blob will get
|
||||||
cached for the next run.
|
cached for the next run.
|
||||||
|
|
||||||
For more information about the Core ML implementation please refer to PR [#1037](https://github.com/ggerganov/whisper.cpp/pull/1037).
|
For more information about the OpenVINO implementation please refer to PR [#1037](https://github.com/ggerganov/whisper.cpp/pull/1037).
|
||||||
|
|
||||||
## NVIDIA GPU support
|
## NVIDIA GPU support
|
||||||
|
|
||||||
@ -418,31 +303,19 @@ First, make sure you have installed `cuda`: https://developer.nvidia.com/cuda-do
|
|||||||
Now build `whisper.cpp` with CUDA support:
|
Now build `whisper.cpp` with CUDA support:
|
||||||
|
|
||||||
```
|
```
|
||||||
make clean
|
cmake -B build -DGGML_CUDA=1
|
||||||
WHISPER_CUDA=1 make -j
|
|
||||||
```
|
|
||||||
|
|
||||||
## OpenCL GPU support via CLBlast
|
|
||||||
|
|
||||||
For cards and integrated GPUs that support OpenCL, the Encoder processing can be largely offloaded to the GPU through CLBlast. This is especially useful for users with AMD APUs or low end devices for up to ~2x speedup.
|
|
||||||
|
|
||||||
First, make sure you have installed `CLBlast` for your OS or Distribution: https://github.com/CNugteren/CLBlast
|
|
||||||
|
|
||||||
Now build `whisper.cpp` with CLBlast support:
|
|
||||||
|
|
||||||
```
|
|
||||||
Makefile:
|
|
||||||
cd whisper.cpp
|
|
||||||
make clean
|
|
||||||
WHISPER_CLBLAST=1 make -j
|
|
||||||
|
|
||||||
CMake:
|
|
||||||
cd whisper.cpp
|
|
||||||
cmake -B build -DWHISPER_CLBLAST=ON
|
|
||||||
cmake --build build -j --config Release
|
cmake --build build -j --config Release
|
||||||
```
|
```
|
||||||
|
|
||||||
Run all the examples as usual.
|
## Vulkan GPU support
|
||||||
|
Cross-vendor solution which allows you to accelerate workload on your GPU.
|
||||||
|
First, make sure your graphics card driver provides support for Vulkan API.
|
||||||
|
|
||||||
|
Now build `whisper.cpp` with Vulkan support:
|
||||||
|
```
|
||||||
|
cmake -B build -DGGML_VULKAN=1
|
||||||
|
cmake --build build -j --config Release
|
||||||
|
```
|
||||||
|
|
||||||
## BLAS CPU support via OpenBLAS
|
## BLAS CPU support via OpenBLAS
|
||||||
|
|
||||||
@ -452,56 +325,40 @@ First, make sure you have installed `openblas`: https://www.openblas.net/
|
|||||||
Now build `whisper.cpp` with OpenBLAS support:
|
Now build `whisper.cpp` with OpenBLAS support:
|
||||||
|
|
||||||
```
|
```
|
||||||
make clean
|
cmake -B build -DGGML_BLAS=1
|
||||||
WHISPER_OPENBLAS=1 make -j
|
cmake --build build -j --config Release
|
||||||
```
|
```
|
||||||
|
|
||||||
## BLAS CPU support via Intel MKL
|
## Ascend NPU support
|
||||||
|
|
||||||
Encoder processing can be accelerated on the CPU via the BLAS compatible interface of Intel's Math Kernel Library.
|
Ascend NPU provides inference acceleration via [`CANN`](https://www.hiascend.com/en/software/cann) and AI cores.
|
||||||
First, make sure you have installed Intel's MKL runtime and development packages: https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl-download.html
|
|
||||||
|
|
||||||
Now build `whisper.cpp` with Intel MKL BLAS support:
|
First, check if your Ascend NPU device is supported:
|
||||||
|
|
||||||
|
**Verified devices**
|
||||||
|
| Ascend NPU | Status |
|
||||||
|
|:-----------------------------:|:-------:|
|
||||||
|
| Atlas 300T A2 | Support |
|
||||||
|
|
||||||
|
Then, make sure you have installed [`CANN toolkit`](https://www.hiascend.com/en/software/cann/community) . The lasted version of CANN is recommanded.
|
||||||
|
|
||||||
|
Now build `whisper.cpp` with CANN support:
|
||||||
|
|
||||||
```
|
```
|
||||||
source /opt/intel/oneapi/setvars.sh
|
cmake -B build -DGGML_CANN=1
|
||||||
mkdir build
|
cmake --build build -j --config Release
|
||||||
cd build
|
|
||||||
cmake -DWHISPER_MKL=ON ..
|
|
||||||
WHISPER_MKL=1 make -j
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Docker
|
Run the inference examples as usual, for example:
|
||||||
|
|
||||||
### Prerequisites
|
|
||||||
|
|
||||||
- Docker must be installed and running on your system.
|
|
||||||
- Create a folder to store big models & intermediate files (ex. /whisper/models)
|
|
||||||
|
|
||||||
### Images
|
|
||||||
|
|
||||||
We have two Docker images available for this project:
|
|
||||||
|
|
||||||
1. `ghcr.io/ggerganov/whisper.cpp:main`: This image includes the main executable file as well as `curl` and `ffmpeg`. (platforms: `linux/amd64`, `linux/arm64`)
|
|
||||||
2. `ghcr.io/ggerganov/whisper.cpp:main-cuda`: Same as `main` but compiled with CUDA support. (platforms: `linux/amd64`)
|
|
||||||
|
|
||||||
### Usage
|
|
||||||
|
|
||||||
```shell
|
|
||||||
# download model and persist it in a local folder
|
|
||||||
docker run -it --rm \
|
|
||||||
-v path/to/models:/models \
|
|
||||||
whisper.cpp:main "./models/download-ggml-model.sh base /models"
|
|
||||||
# transcribe an audio file
|
|
||||||
docker run -it --rm \
|
|
||||||
-v path/to/models:/models \
|
|
||||||
-v path/to/audios:/audios \
|
|
||||||
whisper.cpp:main "./main -m /models/ggml-base.bin -f /audios/jfk.wav"
|
|
||||||
# transcribe an audio file in samples folder
|
|
||||||
docker run -it --rm \
|
|
||||||
-v path/to/models:/models \
|
|
||||||
whisper.cpp:main "./main -m /models/ggml-base.bin -f ./samples/jfk.wav"
|
|
||||||
```
|
```
|
||||||
|
./build/bin/whisper-cli -f samples/jfk.wav -m models/ggml-base.en.bin -t 8
|
||||||
|
```
|
||||||
|
|
||||||
|
*Notes:*
|
||||||
|
|
||||||
|
- If you have trouble with Ascend NPU device, please create a issue with **[CANN]** prefix/tag.
|
||||||
|
- If you run successfully with your Ascend NPU device, please help update the table `Verified devices`.
|
||||||
|
|
||||||
## Installing with Conan
|
## Installing with Conan
|
||||||
|
|
||||||
@ -517,89 +374,6 @@ For detailed instructions on how to use Conan, please refer to the [Conan docume
|
|||||||
|
|
||||||
- Inference only
|
- Inference only
|
||||||
|
|
||||||
## Another example
|
|
||||||
|
|
||||||
Here is another example of transcribing a [3:24 min speech](https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg)
|
|
||||||
in about half a minute on a MacBook M1 Pro, using `medium.en` model:
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Expand to see the result</summary>
|
|
||||||
|
|
||||||
```text
|
|
||||||
$ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8
|
|
||||||
|
|
||||||
whisper_init_from_file: loading model from 'models/ggml-medium.en.bin'
|
|
||||||
whisper_model_load: loading model
|
|
||||||
whisper_model_load: n_vocab = 51864
|
|
||||||
whisper_model_load: n_audio_ctx = 1500
|
|
||||||
whisper_model_load: n_audio_state = 1024
|
|
||||||
whisper_model_load: n_audio_head = 16
|
|
||||||
whisper_model_load: n_audio_layer = 24
|
|
||||||
whisper_model_load: n_text_ctx = 448
|
|
||||||
whisper_model_load: n_text_state = 1024
|
|
||||||
whisper_model_load: n_text_head = 16
|
|
||||||
whisper_model_load: n_text_layer = 24
|
|
||||||
whisper_model_load: n_mels = 80
|
|
||||||
whisper_model_load: f16 = 1
|
|
||||||
whisper_model_load: type = 4
|
|
||||||
whisper_model_load: mem required = 1720.00 MB (+ 43.00 MB per decoder)
|
|
||||||
whisper_model_load: kv self size = 42.00 MB
|
|
||||||
whisper_model_load: kv cross size = 140.62 MB
|
|
||||||
whisper_model_load: adding 1607 extra tokens
|
|
||||||
whisper_model_load: model ctx = 1462.35 MB
|
|
||||||
whisper_model_load: model size = 1462.12 MB
|
|
||||||
|
|
||||||
system_info: n_threads = 8 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 |
|
|
||||||
|
|
||||||
main: processing 'samples/gb1.wav' (3179750 samples, 198.7 sec), 8 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
|
|
||||||
|
|
||||||
|
|
||||||
[00:00:00.000 --> 00:00:08.000] My fellow Americans, this day has brought terrible news and great sadness to our country.
|
|
||||||
[00:00:08.000 --> 00:00:17.000] At nine o'clock this morning, Mission Control in Houston lost contact with our Space Shuttle Columbia.
|
|
||||||
[00:00:17.000 --> 00:00:23.000] A short time later, debris was seen falling from the skies above Texas.
|
|
||||||
[00:00:23.000 --> 00:00:29.000] The Columbia's lost. There are no survivors.
|
|
||||||
[00:00:29.000 --> 00:00:32.000] On board was a crew of seven.
|
|
||||||
[00:00:32.000 --> 00:00:39.000] Colonel Rick Husband, Lieutenant Colonel Michael Anderson, Commander Laurel Clark,
|
|
||||||
[00:00:39.000 --> 00:00:48.000] Captain David Brown, Commander William McCool, Dr. Kultna Shavla, and Ilan Ramon,
|
|
||||||
[00:00:48.000 --> 00:00:52.000] a colonel in the Israeli Air Force.
|
|
||||||
[00:00:52.000 --> 00:00:58.000] These men and women assumed great risk in the service to all humanity.
|
|
||||||
[00:00:58.000 --> 00:01:03.000] In an age when space flight has come to seem almost routine,
|
|
||||||
[00:01:03.000 --> 00:01:07.000] it is easy to overlook the dangers of travel by rocket
|
|
||||||
[00:01:07.000 --> 00:01:12.000] and the difficulties of navigating the fierce outer atmosphere of the Earth.
|
|
||||||
[00:01:12.000 --> 00:01:18.000] These astronauts knew the dangers, and they faced them willingly,
|
|
||||||
[00:01:18.000 --> 00:01:23.000] knowing they had a high and noble purpose in life.
|
|
||||||
[00:01:23.000 --> 00:01:31.000] Because of their courage and daring and idealism, we will miss them all the more.
|
|
||||||
[00:01:31.000 --> 00:01:36.000] All Americans today are thinking as well of the families of these men and women
|
|
||||||
[00:01:36.000 --> 00:01:40.000] who have been given this sudden shock and grief.
|
|
||||||
[00:01:40.000 --> 00:01:45.000] You're not alone. Our entire nation grieves with you,
|
|
||||||
[00:01:45.000 --> 00:01:52.000] and those you love will always have the respect and gratitude of this country.
|
|
||||||
[00:01:52.000 --> 00:01:56.000] The cause in which they died will continue.
|
|
||||||
[00:01:56.000 --> 00:02:04.000] Mankind is led into the darkness beyond our world by the inspiration of discovery
|
|
||||||
[00:02:04.000 --> 00:02:11.000] and the longing to understand. Our journey into space will go on.
|
|
||||||
[00:02:11.000 --> 00:02:16.000] In the skies today, we saw destruction and tragedy.
|
|
||||||
[00:02:16.000 --> 00:02:22.000] Yet farther than we can see, there is comfort and hope.
|
|
||||||
[00:02:22.000 --> 00:02:29.000] In the words of the prophet Isaiah, "Lift your eyes and look to the heavens
|
|
||||||
[00:02:29.000 --> 00:02:35.000] who created all these. He who brings out the starry hosts one by one
|
|
||||||
[00:02:35.000 --> 00:02:39.000] and calls them each by name."
|
|
||||||
[00:02:39.000 --> 00:02:46.000] Because of His great power and mighty strength, not one of them is missing.
|
|
||||||
[00:02:46.000 --> 00:02:55.000] The same Creator who names the stars also knows the names of the seven souls we mourn today.
|
|
||||||
[00:02:55.000 --> 00:03:01.000] The crew of the shuttle Columbia did not return safely to earth,
|
|
||||||
[00:03:01.000 --> 00:03:05.000] yet we can pray that all are safely home.
|
|
||||||
[00:03:05.000 --> 00:03:13.000] May God bless the grieving families, and may God continue to bless America.
|
|
||||||
[00:03:13.000 --> 00:03:19.000] [Silence]
|
|
||||||
|
|
||||||
|
|
||||||
whisper_print_timings: fallbacks = 1 p / 0 h
|
|
||||||
whisper_print_timings: load time = 569.03 ms
|
|
||||||
whisper_print_timings: mel time = 146.85 ms
|
|
||||||
whisper_print_timings: sample time = 238.66 ms / 553 runs ( 0.43 ms per run)
|
|
||||||
whisper_print_timings: encode time = 18665.10 ms / 9 runs ( 2073.90 ms per run)
|
|
||||||
whisper_print_timings: decode time = 13090.93 ms / 549 runs ( 23.85 ms per run)
|
|
||||||
whisper_print_timings: total time = 32733.52 ms
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
## Real-time audio input example
|
## Real-time audio input example
|
||||||
|
|
||||||
This is a naive example of performing real-time inference on audio from your microphone.
|
This is a naive example of performing real-time inference on audio from your microphone.
|
||||||
@ -607,8 +381,9 @@ The [stream](examples/stream) tool samples the audio every half a second and run
|
|||||||
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make stream
|
cmake -B build -DWHISPER_SDL2=ON
|
||||||
./stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
cmake --build build --config Release
|
||||||
|
./build/bin/whisper-stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
|
||||||
```
|
```
|
||||||
|
|
||||||
https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a80f-28ba83be7d09.mp4
|
https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a80f-28ba83be7d09.mp4
|
||||||
@ -619,7 +394,7 @@ Adding the `--print-colors` argument will print the transcribed text using an ex
|
|||||||
to highlight words with high or low confidence:
|
to highlight words with high or low confidence:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./main -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors
|
./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors
|
||||||
```
|
```
|
||||||
|
|
||||||
<img width="965" alt="image" src="https://user-images.githubusercontent.com/1991296/197356445-311c8643-9397-4e5e-b46e-0b4b4daa2530.png">
|
<img width="965" alt="image" src="https://user-images.githubusercontent.com/1991296/197356445-311c8643-9397-4e5e-b46e-0b4b4daa2530.png">
|
||||||
@ -629,7 +404,7 @@ to highlight words with high or low confidence:
|
|||||||
For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`:
|
For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16
|
$ ./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16
|
||||||
|
|
||||||
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
||||||
...
|
...
|
||||||
@ -653,7 +428,7 @@ main: processing './samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 pr
|
|||||||
The `--max-len` argument can be used to obtain word-level timestamps. Simply use `-ml 1`:
|
The `--max-len` argument can be used to obtain word-level timestamps. Simply use `-ml 1`:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1
|
$ ./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1
|
||||||
|
|
||||||
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
whisper_model_load: loading model from './models/ggml-base.en.bin'
|
||||||
...
|
...
|
||||||
@ -700,7 +475,7 @@ Sample usage:
|
|||||||
./models/download-ggml-model.sh small.en-tdrz
|
./models/download-ggml-model.sh small.en-tdrz
|
||||||
|
|
||||||
# run as usual, adding the "-tdrz" command-line argument
|
# run as usual, adding the "-tdrz" command-line argument
|
||||||
./main -f ./samples/a13.wav -m ./models/ggml-small.en-tdrz.bin -tdrz
|
./build/bin/whisper-cli -f ./samples/a13.wav -m ./models/ggml-small.en-tdrz.bin -tdrz
|
||||||
...
|
...
|
||||||
main: processing './samples/a13.wav' (480000 samples, 30.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, tdrz = 1, timestamps = 1 ...
|
main: processing './samples/a13.wav' (480000 samples, 30.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, tdrz = 1, timestamps = 1 ...
|
||||||
...
|
...
|
||||||
@ -717,14 +492,14 @@ main: processing './samples/a13.wav' (480000 samples, 30.0 sec), 4 threads, 1 pr
|
|||||||
|
|
||||||
## Karaoke-style movie generation (experimental)
|
## Karaoke-style movie generation (experimental)
|
||||||
|
|
||||||
The [main](examples/main) example provides support for output of karaoke-style movies, where the
|
The [whisper-cli](examples/cli) example provides support for output of karaoke-style movies, where the
|
||||||
currently pronounced word is highlighted. Use the `-wts` argument and run the generated bash script.
|
currently pronounced word is highlighted. Use the `-wts` argument and run the generated bash script.
|
||||||
This requires to have `ffmpeg` installed.
|
This requires to have `ffmpeg` installed.
|
||||||
|
|
||||||
Here are a few _"typical"_ examples:
|
Here are a few _"typical"_ examples:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts
|
./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts
|
||||||
source ./samples/jfk.wav.wts
|
source ./samples/jfk.wav.wts
|
||||||
ffplay ./samples/jfk.wav.mp4
|
ffplay ./samples/jfk.wav.mp4
|
||||||
```
|
```
|
||||||
@ -734,7 +509,7 @@ https://user-images.githubusercontent.com/1991296/199337465-dbee4b5e-9aeb-48a3-b
|
|||||||
---
|
---
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts
|
./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts
|
||||||
source ./samples/mm0.wav.wts
|
source ./samples/mm0.wav.wts
|
||||||
ffplay ./samples/mm0.wav.mp4
|
ffplay ./samples/mm0.wav.mp4
|
||||||
```
|
```
|
||||||
@ -744,7 +519,7 @@ https://user-images.githubusercontent.com/1991296/199337504-cc8fd233-0cb7-4920-9
|
|||||||
---
|
---
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./main -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts
|
./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts
|
||||||
source ./samples/gb0.wav.wts
|
source ./samples/gb0.wav.wts
|
||||||
ffplay ./samples/gb0.wav.mp4
|
ffplay ./samples/gb0.wav.mp4
|
||||||
```
|
```
|
||||||
@ -769,12 +544,12 @@ https://user-images.githubusercontent.com/1991296/223206245-2d36d903-cf8e-4f09-8
|
|||||||
## Benchmarks
|
## Benchmarks
|
||||||
|
|
||||||
In order to have an objective comparison of the performance of the inference across different system configurations,
|
In order to have an objective comparison of the performance of the inference across different system configurations,
|
||||||
use the [bench](examples/bench) tool. The tool simply runs the Encoder part of the model and prints how much time it
|
use the [whisper-bench](examples/bench) tool. The tool simply runs the Encoder part of the model and prints how much time it
|
||||||
took to execute it. The results are summarized in the following Github issue:
|
took to execute it. The results are summarized in the following Github issue:
|
||||||
|
|
||||||
[Benchmark results](https://github.com/ggerganov/whisper.cpp/issues/89)
|
[Benchmark results](https://github.com/ggerganov/whisper.cpp/issues/89)
|
||||||
|
|
||||||
Additionally a script to run whisper.cpp with different models and audio files is provided [bench.py](bench.py).
|
Additionally a script to run whisper.cpp with different models and audio files is provided [bench.py](scripts/bench.py).
|
||||||
|
|
||||||
You can run it with the following command, by default it will run against any standard model in the models folder.
|
You can run it with the following command, by default it will run against any standard model in the models folder.
|
||||||
|
|
||||||
@ -821,6 +596,7 @@ For more details, see the conversion script [models/convert-pt-to-ggml.py](model
|
|||||||
- [stlukey/whispercpp.py](https://github.com/stlukey/whispercpp.py) (Cython)
|
- [stlukey/whispercpp.py](https://github.com/stlukey/whispercpp.py) (Cython)
|
||||||
- [AIWintermuteAI/whispercpp](https://github.com/AIWintermuteAI/whispercpp) (Updated fork of aarnphm/whispercpp)
|
- [AIWintermuteAI/whispercpp](https://github.com/AIWintermuteAI/whispercpp) (Updated fork of aarnphm/whispercpp)
|
||||||
- [aarnphm/whispercpp](https://github.com/aarnphm/whispercpp) (Pybind11)
|
- [aarnphm/whispercpp](https://github.com/aarnphm/whispercpp) (Pybind11)
|
||||||
|
- [abdeladim-s/pywhispercpp](https://github.com/abdeladim-s/pywhispercpp) (Pybind11)
|
||||||
- [x] R: [bnosac/audio.whisper](https://github.com/bnosac/audio.whisper)
|
- [x] R: [bnosac/audio.whisper](https://github.com/bnosac/audio.whisper)
|
||||||
- [x] Unity: [macoron/whisper.unity](https://github.com/Macoron/whisper.unity)
|
- [x] Unity: [macoron/whisper.unity](https://github.com/Macoron/whisper.unity)
|
||||||
|
|
||||||
@ -831,13 +607,12 @@ Some of the examples are even ported to run in the browser using WebAssembly. Ch
|
|||||||
|
|
||||||
| Example | Web | Description |
|
| Example | Web | Description |
|
||||||
| --------------------------------------------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
| --------------------------------------------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| [main](examples/main) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper |
|
| [whisper-cli](examples/cli) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper |
|
||||||
| [bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine |
|
| [whisper-bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine |
|
||||||
| [stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture |
|
| [whisper-stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture |
|
||||||
| [command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic |
|
| [whisper-command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic |
|
||||||
| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess |
|
| [whisper-server](examples/server) | | HTTP transcription server with OAI-like API |
|
||||||
| [talk](examples/talk) | [talk.wasm](examples/talk.wasm) | Talk with a GPT-2 bot |
|
| [whisper-talk-llama](examples/talk-llama) | | Talk with a LLaMA bot |
|
||||||
| [talk-llama](examples/talk-llama) | | Talk with a LLaMA bot |
|
|
||||||
| [whisper.objc](examples/whisper.objc) | | iOS mobile application using whisper.cpp |
|
| [whisper.objc](examples/whisper.objc) | | iOS mobile application using whisper.cpp |
|
||||||
| [whisper.swiftui](examples/whisper.swiftui) | | SwiftUI iOS / macOS application using whisper.cpp |
|
| [whisper.swiftui](examples/whisper.swiftui) | | SwiftUI iOS / macOS application using whisper.cpp |
|
||||||
| [whisper.android](examples/whisper.android) | | Android mobile application using whisper.cpp |
|
| [whisper.android](examples/whisper.android) | | Android mobile application using whisper.cpp |
|
||||||
@ -845,7 +620,7 @@ Some of the examples are even ported to run in the browser using WebAssembly. Ch
|
|||||||
| [generate-karaoke.sh](examples/generate-karaoke.sh) | | Helper script to easily [generate a karaoke video](https://youtu.be/uj7hVta4blM) of raw audio capture |
|
| [generate-karaoke.sh](examples/generate-karaoke.sh) | | Helper script to easily [generate a karaoke video](https://youtu.be/uj7hVta4blM) of raw audio capture |
|
||||||
| [livestream.sh](examples/livestream.sh) | | [Livestream audio transcription](https://github.com/ggerganov/whisper.cpp/issues/185) |
|
| [livestream.sh](examples/livestream.sh) | | [Livestream audio transcription](https://github.com/ggerganov/whisper.cpp/issues/185) |
|
||||||
| [yt-wsp.sh](examples/yt-wsp.sh) | | Download + transcribe and/or translate any VOD [(original)](https://gist.github.com/DaniruKun/96f763ec1a037cc92fe1a059b643b818) |
|
| [yt-wsp.sh](examples/yt-wsp.sh) | | Download + transcribe and/or translate any VOD [(original)](https://gist.github.com/DaniruKun/96f763ec1a037cc92fe1a059b643b818) |
|
||||||
| [server](examples/server) | | HTTP transcription server with OAI-like API |
|
| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess |
|
||||||
|
|
||||||
## [Discussions](https://github.com/ggerganov/whisper.cpp/discussions)
|
## [Discussions](https://github.com/ggerganov/whisper.cpp/discussions)
|
||||||
|
|
||||||
|
5
Sources/whisper/module.modulemap
Normal file
5
Sources/whisper/module.modulemap
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
module whisper [system] {
|
||||||
|
header "whisper.h"
|
||||||
|
link "whisper"
|
||||||
|
export *
|
||||||
|
}
|
4
Sources/whisper/whisper.h
Normal file
4
Sources/whisper/whisper.h
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <whisper.h>
|
||||||
|
|
@ -14,9 +14,14 @@ GGML_METAL_PATH_RESOURCES := $(abspath ../..)
|
|||||||
BUILD_DIR := build
|
BUILD_DIR := build
|
||||||
MODELS_DIR := models
|
MODELS_DIR := models
|
||||||
EXAMPLES_DIR := $(wildcard examples/*)
|
EXAMPLES_DIR := $(wildcard examples/*)
|
||||||
INCLUDE_PATH := $(abspath ../..)
|
INCLUDE_PATH := $(abspath ../../include):$(abspath ../../ggml/include)
|
||||||
LIBRARY_PATH := $(abspath ../..)
|
LIBRARY_PATH := $(abspath ../..)
|
||||||
|
|
||||||
|
ifeq ($(GGML_CUDA),1)
|
||||||
|
LIBRARY_PATH := $(LIBRARY_PATH):$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib/
|
||||||
|
BUILD_FLAGS := -ldflags "-extldflags '-lcudart -lcuda -lcublas'"
|
||||||
|
endif
|
||||||
|
|
||||||
ifeq ($(UNAME_S),Darwin)
|
ifeq ($(UNAME_S),Darwin)
|
||||||
EXT_LDFLAGS := -framework Foundation -framework Metal -framework MetalKit
|
EXT_LDFLAGS := -framework Foundation -framework Metal -framework MetalKit
|
||||||
endif
|
endif
|
||||||
|
@ -62,6 +62,12 @@ This will compile a static `libwhisper.a` in a `build` folder, download a model
|
|||||||
make examples
|
make examples
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To build using cuda support add `GGML_CUDA=1`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
GGML_CUDA=1 make examples
|
||||||
|
```
|
||||||
|
|
||||||
The examples are placed in the `build` directory. Once built, you can download all the models with the following command:
|
The examples are placed in the `build` directory. Once built, you can download all the models with the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -24,7 +24,7 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// The models which will be downloaded, if no model is specified as an argument
|
// The models which will be downloaded, if no model is specified as an argument
|
||||||
modelNames = []string{"ggml-tiny.en", "ggml-tiny", "ggml-base.en", "ggml-base", "ggml-small.en", "ggml-small", "ggml-medium.en", "ggml-medium", "ggml-large-v1", "ggml-large-v2", "ggml-large-v3"}
|
modelNames = []string{"ggml-tiny.en", "ggml-tiny", "ggml-base.en", "ggml-base", "ggml-small.en", "ggml-small", "ggml-medium.en", "ggml-medium", "ggml-large-v1", "ggml-large-v2", "ggml-large-v3", "large-v3-turbo"}
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
module github.com/ggerganov/whisper.cpp/bindings/go
|
module github.com/ggerganov/whisper.cpp/bindings/go
|
||||||
|
|
||||||
go 1.19
|
go 1.23
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/go-audio/wav v1.1.0
|
github.com/go-audio/wav v1.1.0
|
||||||
github.com/stretchr/testify v1.8.1
|
github.com/stretchr/testify v1.9.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/go-audio/audio v1.0.0 h1:zS9vebldgbQqktK4H0lUqWrG8P0NxCJVqcj7ZpNnwd4=
|
github.com/go-audio/audio v1.0.0 h1:zS9vebldgbQqktK4H0lUqWrG8P0NxCJVqcj7ZpNnwd4=
|
||||||
@ -9,15 +8,9 @@ github.com/go-audio/wav v1.1.0 h1:jQgLtbqBzY7G+BM8fXF7AHUk1uHUviWS4X39d5rsL2g=
|
|||||||
github.com/go-audio/wav v1.1.0/go.mod h1:mpe9qfwbScEbkd8uybLuIpTgHyrISw/OTuvjUW2iGtE=
|
github.com/go-audio/wav v1.1.0/go.mod h1:mpe9qfwbScEbkd8uybLuIpTgHyrISw/OTuvjUW2iGtE=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
|
||||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
@ -119,6 +119,28 @@ func (p *Params) SetAudioCtx(n int) {
|
|||||||
p.audio_ctx = C.int(n)
|
p.audio_ctx = C.int(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Params) SetMaxContext(n int) {
|
||||||
|
p.n_max_text_ctx = C.int(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Params) SetBeamSize(n int) {
|
||||||
|
p.beam_search.beam_size = C.int(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Params) SetEntropyThold(t float32) {
|
||||||
|
p.entropy_thold = C.float(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Params) SetTemperature(t float32) {
|
||||||
|
p.temperature = C.float(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets the fallback temperature incrementation
|
||||||
|
// Pass -1.0 to disable this feature
|
||||||
|
func (p *Params) SetTemperatureFallback(t float32) {
|
||||||
|
p.temperature_inc = C.float(t)
|
||||||
|
}
|
||||||
|
|
||||||
// Set initial prompt
|
// Set initial prompt
|
||||||
func (p *Params) SetInitialPrompt(prompt string) {
|
func (p *Params) SetInitialPrompt(prompt string) {
|
||||||
p.initial_prompt = C.CString(prompt)
|
p.initial_prompt = C.CString(prompt)
|
||||||
@ -149,6 +171,10 @@ func (p *Params) String() string {
|
|||||||
str += fmt.Sprintf(" duration_ms=%d", p.duration_ms)
|
str += fmt.Sprintf(" duration_ms=%d", p.duration_ms)
|
||||||
str += fmt.Sprintf(" audio_ctx=%d", p.audio_ctx)
|
str += fmt.Sprintf(" audio_ctx=%d", p.audio_ctx)
|
||||||
str += fmt.Sprintf(" initial_prompt=%s", C.GoString(p.initial_prompt))
|
str += fmt.Sprintf(" initial_prompt=%s", C.GoString(p.initial_prompt))
|
||||||
|
str += fmt.Sprintf(" entropy_thold=%f", p.entropy_thold)
|
||||||
|
str += fmt.Sprintf(" temperature=%f", p.temperature)
|
||||||
|
str += fmt.Sprintf(" temperature_inc=%f", p.temperature_inc)
|
||||||
|
str += fmt.Sprintf(" beam_size=%d", p.beam_search.beam_size)
|
||||||
if p.translate {
|
if p.translate {
|
||||||
str += " translate"
|
str += " translate"
|
||||||
}
|
}
|
||||||
|
@ -125,6 +125,32 @@ func (context *context) SetAudioCtx(n uint) {
|
|||||||
context.params.SetAudioCtx(int(n))
|
context.params.SetAudioCtx(int(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set maximum number of text context tokens to store
|
||||||
|
func (context *context) SetMaxContext(n int) {
|
||||||
|
context.params.SetMaxContext(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set Beam Size
|
||||||
|
func (context *context) SetBeamSize(n int) {
|
||||||
|
context.params.SetBeamSize(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set Entropy threshold
|
||||||
|
func (context *context) SetEntropyThold(t float32) {
|
||||||
|
context.params.SetEntropyThold(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set Temperature
|
||||||
|
func (context *context) SetTemperature(t float32) {
|
||||||
|
context.params.SetTemperature(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the fallback temperature incrementation
|
||||||
|
// Pass -1.0 to disable this feature
|
||||||
|
func (context *context) SetTemperatureFallback(t float32) {
|
||||||
|
context.params.SetTemperatureFallback(t)
|
||||||
|
}
|
||||||
|
|
||||||
// Set initial prompt
|
// Set initial prompt
|
||||||
func (context *context) SetInitialPrompt(prompt string) {
|
func (context *context) SetInitialPrompt(prompt string) {
|
||||||
context.params.SetInitialPrompt(prompt)
|
context.params.SetInitialPrompt(prompt)
|
||||||
|
@ -4,52 +4,90 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
// Packages
|
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
|
||||||
whisper "github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
|
"github.com/go-audio/wav"
|
||||||
assert "github.com/stretchr/testify/assert"
|
assert "github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
func TestSetLanguage(t *testing.T) {
|
||||||
ModelPath = "../../models/ggml-tiny.bin"
|
|
||||||
SamplePath = "../../samples/jfk.wav"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Test_Whisper_000(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
if _, err := os.Stat(ModelPath); os.IsNotExist(err) {
|
|
||||||
t.Skip("Skipping test, model not found:", ModelPath)
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(SamplePath); os.IsNotExist(err) {
|
|
||||||
t.Skip("Skipping test, sample not found:", SamplePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load model
|
|
||||||
model, err := whisper.New(ModelPath)
|
|
||||||
assert.NoError(err)
|
|
||||||
assert.NotNil(model)
|
|
||||||
assert.NoError(model.Close())
|
|
||||||
|
|
||||||
t.Log("languages=", model.Languages())
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_Whisper_001(t *testing.T) {
|
|
||||||
assert := assert.New(t)
|
|
||||||
if _, err := os.Stat(ModelPath); os.IsNotExist(err) {
|
|
||||||
t.Skip("Skipping test, model not found:", ModelPath)
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(SamplePath); os.IsNotExist(err) {
|
|
||||||
t.Skip("Skipping test, sample not found:", SamplePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load model
|
|
||||||
model, err := whisper.New(ModelPath)
|
model, err := whisper.New(ModelPath)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.NotNil(model)
|
assert.NotNil(model)
|
||||||
defer model.Close()
|
defer model.Close()
|
||||||
|
|
||||||
// Get context for decoding
|
context, err := model.NewContext()
|
||||||
ctx, err := model.NewContext()
|
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.NotNil(ctx)
|
|
||||||
|
|
||||||
|
// This returns an error since
|
||||||
|
// the model 'models/ggml-small.en.bin'
|
||||||
|
// that is loaded is not multilingual
|
||||||
|
err = context.SetLanguage("en")
|
||||||
|
assert.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestContextModelIsMultilingual(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
context, err := model.NewContext()
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
isMultilingual := context.IsMultilingual()
|
||||||
|
|
||||||
|
// This returns false since
|
||||||
|
// the model 'models/ggml-small.en.bin'
|
||||||
|
// that is loaded is not multilingual
|
||||||
|
assert.False(isMultilingual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLanguage(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
context, err := model.NewContext()
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
// This always returns en since
|
||||||
|
// the model 'models/ggml-small.en.bin'
|
||||||
|
// that is loaded is not multilingual
|
||||||
|
expectedLanguage := "en"
|
||||||
|
actualLanguage := context.Language()
|
||||||
|
assert.Equal(expectedLanguage, actualLanguage)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcess(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
fh, err := os.Open(SamplePath)
|
||||||
|
assert.NoError(err)
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
// Decode the WAV file - load the full buffer
|
||||||
|
dec := wav.NewDecoder(fh)
|
||||||
|
buf, err := dec.FullPCMBuffer()
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.Equal(uint16(1), dec.NumChans)
|
||||||
|
|
||||||
|
data := buf.AsFloat32Buffer().Data
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
context, err := model.NewContext()
|
||||||
|
assert.NoError(err)
|
||||||
|
|
||||||
|
err = context.Process(data, nil, nil)
|
||||||
|
assert.NoError(err)
|
||||||
}
|
}
|
||||||
|
@ -38,17 +38,22 @@ type Context interface {
|
|||||||
IsMultilingual() bool // Return true if the model is multilingual.
|
IsMultilingual() bool // Return true if the model is multilingual.
|
||||||
Language() string // Get language
|
Language() string // Get language
|
||||||
|
|
||||||
SetOffset(time.Duration) // Set offset
|
SetOffset(time.Duration) // Set offset
|
||||||
SetDuration(time.Duration) // Set duration
|
SetDuration(time.Duration) // Set duration
|
||||||
SetThreads(uint) // Set number of threads to use
|
SetThreads(uint) // Set number of threads to use
|
||||||
SetSplitOnWord(bool) // Set split on word flag
|
SetSplitOnWord(bool) // Set split on word flag
|
||||||
SetTokenThreshold(float32) // Set timestamp token probability threshold
|
SetTokenThreshold(float32) // Set timestamp token probability threshold
|
||||||
SetTokenSumThreshold(float32) // Set timestamp token sum probability threshold
|
SetTokenSumThreshold(float32) // Set timestamp token sum probability threshold
|
||||||
SetMaxSegmentLength(uint) // Set max segment length in characters
|
SetMaxSegmentLength(uint) // Set max segment length in characters
|
||||||
SetTokenTimestamps(bool) // Set token timestamps flag
|
SetTokenTimestamps(bool) // Set token timestamps flag
|
||||||
SetMaxTokensPerSegment(uint) // Set max tokens per segment (0 = no limit)
|
SetMaxTokensPerSegment(uint) // Set max tokens per segment (0 = no limit)
|
||||||
SetAudioCtx(uint) // Set audio encoder context
|
SetAudioCtx(uint) // Set audio encoder context
|
||||||
SetInitialPrompt(prompt string) // Set initial prompt
|
SetMaxContext(n int) // Set maximum number of text context tokens to store
|
||||||
|
SetBeamSize(n int) // Set Beam Size
|
||||||
|
SetEntropyThold(t float32) // Set Entropy threshold
|
||||||
|
SetInitialPrompt(prompt string) // Set initial prompt
|
||||||
|
SetTemperature(t float32) // Set temperature
|
||||||
|
SetTemperatureFallback(t float32) // Set temperature incrementation
|
||||||
|
|
||||||
// Process mono audio data and return any errors.
|
// Process mono audio data and return any errors.
|
||||||
// If defined, newly generated segments are passed to the
|
// If defined, newly generated segments are passed to the
|
||||||
|
91
bindings/go/pkg/whisper/model_test.go
Normal file
91
bindings/go/pkg/whisper/model_test.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
package whisper_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
|
||||||
|
assert "github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNew(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
t.Run("valid model path", func(t *testing.T) {
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid model path", func(t *testing.T) {
|
||||||
|
invalidModelPath := "invalid-model-path.bin"
|
||||||
|
model, err := whisper.New(invalidModelPath)
|
||||||
|
assert.Error(err)
|
||||||
|
assert.Nil(model)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClose(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
|
||||||
|
err = model.Close()
|
||||||
|
assert.NoError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewContext(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
context, err := model.NewContext()
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(context)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsMultilingual(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
isMultilingual := model.IsMultilingual()
|
||||||
|
|
||||||
|
// This returns false since
|
||||||
|
// the model 'models/ggml-small.en.bin'
|
||||||
|
// that is loaded is not multilingual
|
||||||
|
assert.False(isMultilingual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLanguages(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
model, err := whisper.New(ModelPath)
|
||||||
|
assert.NoError(err)
|
||||||
|
assert.NotNil(model)
|
||||||
|
defer model.Close()
|
||||||
|
|
||||||
|
expectedLanguages := []string{
|
||||||
|
"en", "zh", "de", "es", "ru", "ko", "fr", "ja", "pt", "tr", "pl",
|
||||||
|
"ca", "nl", "ar", "sv", "it", "id", "hi", "fi", "vi", "he", "uk",
|
||||||
|
"el", "ms", "cs", "ro", "da", "hu", "ta", "no", "th", "ur", "hr",
|
||||||
|
"bg", "lt", "la", "mi", "ml", "cy", "sk", "te", "fa", "lv", "bn",
|
||||||
|
"sr", "az", "sl", "kn", "et", "mk", "br", "eu", "is", "hy", "ne",
|
||||||
|
"mn", "bs", "kk", "sq", "sw", "gl", "mr", "pa", "si", "km", "sn",
|
||||||
|
"yo", "so", "af", "oc", "ka", "be", "tg", "sd", "gu", "am", "yi",
|
||||||
|
"lo", "uz", "fo", "ht", "ps", "tk", "nn", "mt", "sa", "lb", "my",
|
||||||
|
"bo", "tl", "mg", "as", "tt", "haw", "ln", "ha", "ba", "jw", "su",
|
||||||
|
}
|
||||||
|
|
||||||
|
actualLanguages := model.Languages()
|
||||||
|
|
||||||
|
assert.Equal(expectedLanguages, actualLanguages)
|
||||||
|
}
|
6
bindings/go/pkg/whisper/util_test.go
Normal file
6
bindings/go/pkg/whisper/util_test.go
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
package whisper_test
|
||||||
|
|
||||||
|
const (
|
||||||
|
ModelPath = "../../models/ggml-small.en.bin"
|
||||||
|
SamplePath = "../../samples/jfk.wav"
|
||||||
|
)
|
@ -9,7 +9,7 @@ import (
|
|||||||
// CGO
|
// CGO
|
||||||
|
|
||||||
/*
|
/*
|
||||||
#cgo LDFLAGS: -lwhisper -lm -lstdc++
|
#cgo LDFLAGS: -lwhisper -lm -lstdc++ -fopenmp
|
||||||
#cgo darwin LDFLAGS: -framework Accelerate -framework Metal -framework Foundation -framework CoreGraphics
|
#cgo darwin LDFLAGS: -framework Accelerate -framework Metal -framework Foundation -framework CoreGraphics
|
||||||
#include <whisper.h>
|
#include <whisper.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
Submodule bindings/ios deleted from a2085436c2
@ -67,5 +67,5 @@ copy /y ..\..\build\bin\Release\whisper.dll build\generated\resources\main\win32
|
|||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
The license for the Go bindings is the same as the license for the rest of the whisper.cpp project, which is the MIT License. See the `LICENSE` file for more details.
|
The license for the Java bindings is the same as the license for the rest of the whisper.cpp project, which is the MIT License. See the `LICENSE` file for more details.
|
||||||
|
|
||||||
|
@ -181,11 +181,11 @@ public class WhisperFullParams extends Structure {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** Flag to suppress non-speech tokens. */
|
/** Flag to suppress non-speech tokens. */
|
||||||
public CBool suppress_non_speech_tokens;
|
public CBool suppress_nst;
|
||||||
|
|
||||||
/** Flag to suppress non-speech tokens. */
|
/** Flag to suppress non-speech tokens. */
|
||||||
public void suppressNonSpeechTokens(boolean enable) {
|
public void suppressNonSpeechTokens(boolean enable) {
|
||||||
suppress_non_speech_tokens = enable ? CBool.TRUE : CBool.FALSE;
|
suppress_nst = enable ? CBool.TRUE : CBool.FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Initial decoding temperature. */
|
/** Initial decoding temperature. */
|
||||||
@ -315,7 +315,7 @@ public class WhisperFullParams extends Structure {
|
|||||||
"print_special", "print_progress", "print_realtime", "print_timestamps", "token_timestamps",
|
"print_special", "print_progress", "print_realtime", "print_timestamps", "token_timestamps",
|
||||||
"thold_pt", "thold_ptsum", "max_len", "split_on_word", "max_tokens", "audio_ctx",
|
"thold_pt", "thold_ptsum", "max_len", "split_on_word", "max_tokens", "audio_ctx",
|
||||||
"tdrz_enable", "suppress_regex", "initial_prompt", "prompt_tokens", "prompt_n_tokens", "language", "detect_language",
|
"tdrz_enable", "suppress_regex", "initial_prompt", "prompt_tokens", "prompt_n_tokens", "language", "detect_language",
|
||||||
"suppress_blank", "suppress_non_speech_tokens", "temperature", "max_initial_ts", "length_penalty",
|
"suppress_blank", "suppress_nst", "temperature", "max_initial_ts", "length_penalty",
|
||||||
"temperature_inc", "entropy_thold", "logprob_thold", "no_speech_thold", "greedy", "beam_search",
|
"temperature_inc", "entropy_thold", "logprob_thold", "no_speech_thold", "greedy", "beam_search",
|
||||||
"new_segment_callback", "new_segment_callback_user_data",
|
"new_segment_callback", "new_segment_callback_user_data",
|
||||||
"progress_callback", "progress_callback_user_data",
|
"progress_callback", "progress_callback_user_data",
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "whisper.cpp",
|
"name": "whisper.cpp",
|
||||||
"version": "1.6.2",
|
"version": "1.7.4",
|
||||||
"description": "Whisper speech recognition",
|
"description": "Whisper speech recognition",
|
||||||
"main": "whisper.js",
|
"main": "whisper.js",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
3
bindings/ruby/.gitignore
vendored
Normal file
3
bindings/ruby/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
LICENSE
|
||||||
|
pkg/
|
||||||
|
lib/whisper.*
|
243
bindings/ruby/README.md
Normal file
243
bindings/ruby/README.md
Normal file
@ -0,0 +1,243 @@
|
|||||||
|
whispercpp
|
||||||
|
==========
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Ruby bindings for [whisper.cpp][], an interface of automatic speech recognition model.
|
||||||
|
|
||||||
|
Installation
|
||||||
|
------------
|
||||||
|
|
||||||
|
Install the gem and add to the application's Gemfile by executing:
|
||||||
|
|
||||||
|
$ bundle add whispercpp
|
||||||
|
|
||||||
|
If bundler is not being used to manage dependencies, install the gem by executing:
|
||||||
|
|
||||||
|
$ gem install whispercpp
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
require "whisper"
|
||||||
|
|
||||||
|
whisper = Whisper::Context.new("base")
|
||||||
|
|
||||||
|
params = Whisper::Params.new
|
||||||
|
params.language = "en"
|
||||||
|
params.offset = 10_000
|
||||||
|
params.duration = 60_000
|
||||||
|
params.max_text_tokens = 300
|
||||||
|
params.translate = true
|
||||||
|
params.print_timestamps = false
|
||||||
|
params.initial_prompt = "Initial prompt here."
|
||||||
|
|
||||||
|
whisper.transcribe("path/to/audio.wav", params) do |whole_text|
|
||||||
|
puts whole_text
|
||||||
|
end
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Preparing model ###
|
||||||
|
|
||||||
|
Some models are prepared up-front:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
base_en = Whisper::Model.pre_converted_models["base.en"]
|
||||||
|
whisper = Whisper::Context.new(base_en)
|
||||||
|
```
|
||||||
|
|
||||||
|
At first time you use a model, it is downloaded automatically. After that, downloaded cached file is used. To clear cache, call `#clear_cache`:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
Whisper::Model.pre_converted_models["base"].clear_cache
|
||||||
|
```
|
||||||
|
|
||||||
|
You also can use shorthand for pre-converted models:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
whisper = Whisper::Context.new("base.en")
|
||||||
|
```
|
||||||
|
|
||||||
|
You can see the list of prepared model names by `Whisper::Model.pre_converted_models.keys`:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
puts Whisper::Model.pre_converted_models.keys
|
||||||
|
# tiny
|
||||||
|
# tiny.en
|
||||||
|
# tiny-q5_1
|
||||||
|
# tiny.en-q5_1
|
||||||
|
# tiny-q8_0
|
||||||
|
# base
|
||||||
|
# base.en
|
||||||
|
# base-q5_1
|
||||||
|
# base.en-q5_1
|
||||||
|
# base-q8_0
|
||||||
|
# :
|
||||||
|
# :
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also use local model files you prepared:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
whisper = Whisper::Context.new("path/to/your/model.bin")
|
||||||
|
```
|
||||||
|
|
||||||
|
Or, you can download model files:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
whisper = Whisper::Context.new("https://example.net/uri/of/your/model.bin")
|
||||||
|
# Or
|
||||||
|
whisper = Whisper::Context.new(URI("https://example.net/uri/of/your/model.bin"))
|
||||||
|
```
|
||||||
|
|
||||||
|
See [models][] page for details.
|
||||||
|
|
||||||
|
### Preparing audio file ###
|
||||||
|
|
||||||
|
Currently, whisper.cpp accepts only 16-bit WAV files.
|
||||||
|
|
||||||
|
API
|
||||||
|
---
|
||||||
|
|
||||||
|
### Segments ###
|
||||||
|
|
||||||
|
Once `Whisper::Context#transcribe` called, you can retrieve segments by `#each_segment`:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
def format_time(time_ms)
|
||||||
|
sec, decimal_part = time_ms.divmod(1000)
|
||||||
|
min, sec = sec.divmod(60)
|
||||||
|
hour, min = min.divmod(60)
|
||||||
|
"%02d:%02d:%02d.%03d" % [hour, min, sec, decimal_part]
|
||||||
|
end
|
||||||
|
|
||||||
|
whisper.transcribe("path/to/audio.wav", params)
|
||||||
|
|
||||||
|
whisper.each_segment.with_index do |segment, index|
|
||||||
|
line = "[%{nth}: %{st} --> %{ed}] %{text}" % {
|
||||||
|
nth: index + 1,
|
||||||
|
st: format_time(segment.start_time),
|
||||||
|
ed: format_time(segment.end_time),
|
||||||
|
text: segment.text
|
||||||
|
}
|
||||||
|
line << " (speaker turned)" if segment.speaker_next_turn?
|
||||||
|
puts line
|
||||||
|
end
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also add hook to params called on new segment:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
# Add hook before calling #transcribe
|
||||||
|
params.on_new_segment do |segment|
|
||||||
|
line = "[%{st} --> %{ed}] %{text}" % {
|
||||||
|
st: format_time(segment.start_time),
|
||||||
|
ed: format_time(segment.end_time),
|
||||||
|
text: segment.text
|
||||||
|
}
|
||||||
|
line << " (speaker turned)" if segment.speaker_next_turn?
|
||||||
|
puts line
|
||||||
|
end
|
||||||
|
|
||||||
|
whisper.transcribe("path/to/audio.wav", params)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Models ###
|
||||||
|
|
||||||
|
You can see model information:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
whisper = Whisper::Context.new("base")
|
||||||
|
model = whisper.model
|
||||||
|
|
||||||
|
model.n_vocab # => 51864
|
||||||
|
model.n_audio_ctx # => 1500
|
||||||
|
model.n_audio_state # => 512
|
||||||
|
model.n_audio_head # => 8
|
||||||
|
model.n_audio_layer # => 6
|
||||||
|
model.n_text_ctx # => 448
|
||||||
|
model.n_text_state # => 512
|
||||||
|
model.n_text_head # => 8
|
||||||
|
model.n_text_layer # => 6
|
||||||
|
model.n_mels # => 80
|
||||||
|
model.ftype # => 1
|
||||||
|
model.type # => "base"
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Logging ###
|
||||||
|
|
||||||
|
You can set log callback:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
prefix = "[MyApp] "
|
||||||
|
log_callback = ->(level, buffer, user_data) {
|
||||||
|
case level
|
||||||
|
when Whisper::LOG_LEVEL_NONE
|
||||||
|
puts "#{user_data}none: #{buffer}"
|
||||||
|
when Whisper::LOG_LEVEL_INFO
|
||||||
|
puts "#{user_data}info: #{buffer}"
|
||||||
|
when Whisper::LOG_LEVEL_WARN
|
||||||
|
puts "#{user_data}warn: #{buffer}"
|
||||||
|
when Whisper::LOG_LEVEL_ERROR
|
||||||
|
puts "#{user_data}error: #{buffer}"
|
||||||
|
when Whisper::LOG_LEVEL_DEBUG
|
||||||
|
puts "#{user_data}debug: #{buffer}"
|
||||||
|
when Whisper::LOG_LEVEL_CONT
|
||||||
|
puts "#{user_data}same to previous: #{buffer}"
|
||||||
|
end
|
||||||
|
}
|
||||||
|
Whisper.log_set log_callback, prefix
|
||||||
|
```
|
||||||
|
|
||||||
|
Using this feature, you are also able to suppress log:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
Whisper.log_set ->(level, buffer, user_data) {
|
||||||
|
# do nothing
|
||||||
|
}, nil
|
||||||
|
Whisper::Context.new("base")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Low-level API to transcribe ###
|
||||||
|
|
||||||
|
You can also call `Whisper::Context#full` and `#full_parallel` with a Ruby array as samples. Although `#transcribe` with audio file path is recommended because it extracts PCM samples in C++ and is fast, `#full` and `#full_parallel` give you flexibility.
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
require "whisper"
|
||||||
|
require "wavefile"
|
||||||
|
|
||||||
|
reader = WaveFile::Reader.new("path/to/audio.wav", WaveFile::Format.new(:mono, :float, 16000))
|
||||||
|
samples = reader.enum_for(:each_buffer).map(&:samples).flatten
|
||||||
|
|
||||||
|
whisper = Whisper::Context.new("base")
|
||||||
|
whisper.full(Whisper::Params.new, samples)
|
||||||
|
whisper.each_segment do |segment|
|
||||||
|
puts segment.text
|
||||||
|
end
|
||||||
|
```
|
||||||
|
|
||||||
|
The second argument `samples` may be an array, an object with `length` and `each` method, or a MemoryView. If you can prepare audio data as C array and export it as a MemoryView, whispercpp accepts and works with it with zero copy.
|
||||||
|
|
||||||
|
Development
|
||||||
|
-----------
|
||||||
|
|
||||||
|
% git clone https://github.com/ggerganov/whisper.cpp.git
|
||||||
|
% cd whisper.cpp/bindings/ruby
|
||||||
|
% rake test
|
||||||
|
|
||||||
|
First call of `rake test` builds an extension and downloads a model for testing. After that, you add tests in `tests` directory and modify `ext/ruby_whisper.cpp`.
|
||||||
|
|
||||||
|
If something seems wrong on build, running `rake clean` solves some cases.
|
||||||
|
|
||||||
|
License
|
||||||
|
-------
|
||||||
|
|
||||||
|
The same to [whisper.cpp][].
|
||||||
|
|
||||||
|
[whisper.cpp]: https://github.com/ggerganov/whisper.cpp
|
||||||
|
[models]: https://github.com/ggerganov/whisper.cpp/tree/master/models
|
@ -1,12 +1,64 @@
|
|||||||
require 'rake/clean'
|
require 'rake/clean'
|
||||||
require 'rubygems/package'
|
require "bundler/gem_tasks"
|
||||||
|
require "rake/testtask"
|
||||||
|
require_relative "extsources"
|
||||||
|
|
||||||
desc 'Build gem'
|
SOURCES = FileList[]
|
||||||
task :package do
|
|
||||||
spec_source = File.read File.join(File.dirname(__FILE__),'whispercpp.gemspec')
|
EXTSOURCES.each do |src|
|
||||||
spec = nil
|
basename = src.pathmap("%f")
|
||||||
# see: http://gist.github.com/16215
|
dest = basename == "LICENSE" ? basename : src.pathmap("%{../..,ext}p")
|
||||||
Thread.new { spec = eval("#{spec_source}") }.join
|
dir = dest.pathmap("%d")
|
||||||
spec.validate
|
file src
|
||||||
Gem::Package.build(spec)
|
directory dir
|
||||||
|
file dest => [src, dir] do |t|
|
||||||
|
cp t.source, t.name
|
||||||
|
end
|
||||||
|
SOURCES.include dest
|
||||||
end
|
end
|
||||||
|
|
||||||
|
CLEAN.include SOURCES
|
||||||
|
CLEAN.include FileList["ext/*.o", "ext/*.metal", "ext/whisper.{so,bundle,dll}"]
|
||||||
|
|
||||||
|
task build: ["ext/Makefile", "ext/ruby_whisper.h", "ext/ruby_whisper.cpp", "whispercpp.gemspec"]
|
||||||
|
|
||||||
|
directory "pkg"
|
||||||
|
CLOBBER.include "pkg"
|
||||||
|
|
||||||
|
LIB_NAME = "whisper".ext(RbConfig::CONFIG["DLEXT"])
|
||||||
|
SO_FILE = File.join("ext", LIB_NAME)
|
||||||
|
LIB_FILE = File.join("lib", LIB_NAME)
|
||||||
|
|
||||||
|
file "ext/Makefile" => ["ext/extconf.rb", "ext/ruby_whisper.h", "ext/ruby_whisper.cpp"] + SOURCES do |t|
|
||||||
|
Dir.chdir "ext" do
|
||||||
|
ruby "extconf.rb"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
file SO_FILE => "ext/Makefile" do |t|
|
||||||
|
Dir.chdir "ext" do
|
||||||
|
sh "make"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
CLEAN.include SO_FILE
|
||||||
|
|
||||||
|
directory "lib"
|
||||||
|
file LIB_FILE => [SO_FILE, "lib"] do |t|
|
||||||
|
copy t.source, t.name
|
||||||
|
end
|
||||||
|
CLEAN.include LIB_FILE
|
||||||
|
|
||||||
|
Rake::TestTask.new do |t|
|
||||||
|
t.test_files = FileList["tests/test_*.rb"]
|
||||||
|
end
|
||||||
|
|
||||||
|
TEST_MEMORY_VIEW = "tests/jfk_reader/jfk_reader.#{RbConfig::CONFIG['DLEXT']}"
|
||||||
|
file TEST_MEMORY_VIEW => "tests/jfk_reader/jfk_reader.c" do |t|
|
||||||
|
Dir.chdir "tests/jfk_reader" do
|
||||||
|
ruby "extconf.rb"
|
||||||
|
sh "make"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
CLEAN.include "tests/jfk_reader/jfk_reader.{o,#{RbConfig::CONFIG['DLEXT']}}"
|
||||||
|
|
||||||
|
task test: [LIB_FILE, TEST_MEMORY_VIEW]
|
||||||
|
18
bindings/ruby/ext/.gitignore
vendored
18
bindings/ruby/ext/.gitignore
vendored
@ -1,9 +1,13 @@
|
|||||||
Makefile
|
Makefile
|
||||||
ggml.c
|
whisper.so
|
||||||
ggml.h
|
|
||||||
ggml-alloc.c
|
|
||||||
ggml-alloc.h
|
|
||||||
whisper.bundle
|
whisper.bundle
|
||||||
whisper.cpp
|
whisper.dll
|
||||||
whisper.h
|
scripts/get-flags.mk
|
||||||
dr_wav.h
|
*.o
|
||||||
|
*.c
|
||||||
|
*.cpp
|
||||||
|
*.h
|
||||||
|
*.m
|
||||||
|
*.metal
|
||||||
|
!ruby_whisper.cpp
|
||||||
|
!ruby_whisper.h
|
||||||
|
9
bindings/ruby/ext/cpu.mk
Normal file
9
bindings/ruby/ext/cpu.mk
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
ggml/src/ggml-cpu/ggml-cpu-cpp.o: \
|
||||||
|
ggml/src/ggml-cpu/ggml-cpu.cpp \
|
||||||
|
ggml/include/ggml-backend.h \
|
||||||
|
ggml/include/ggml.h \
|
||||||
|
ggml/include/ggml-alloc.h \
|
||||||
|
ggml/src/ggml-backend-impl.h \
|
||||||
|
ggml/include/ggml-cpu.h \
|
||||||
|
ggml/src/ggml-impl.h
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
@ -1,23 +1,10 @@
|
|||||||
require 'mkmf'
|
require 'mkmf'
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper.cpp')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper-mel.hpp')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml.c')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-impl.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-alloc.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-alloc.c')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-backend-impl.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-backend.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-backend.c')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-common.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-quants.h')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-quants.c')} .")
|
|
||||||
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','examples','dr_wav.h')} .")
|
|
||||||
|
|
||||||
|
|
||||||
# need to use c++ compiler flags
|
# need to use c++ compiler flags
|
||||||
$CXXFLAGS << ' -std=c++11'
|
$CXXFLAGS << ' -std=c++17'
|
||||||
|
|
||||||
|
$LDFLAGS << ' -lstdc++'
|
||||||
|
|
||||||
# Set to true when building binary gems
|
# Set to true when building binary gems
|
||||||
if enable_config('static-stdlib', false)
|
if enable_config('static-stdlib', false)
|
||||||
$LDFLAGS << ' -static-libgcc -static-libstdc++'
|
$LDFLAGS << ' -static-libgcc -static-libstdc++'
|
||||||
@ -28,4 +15,185 @@ if enable_config('march-tune-native', false)
|
|||||||
$CXXFLAGS << ' -march=native -mtune=native'
|
$CXXFLAGS << ' -march=native -mtune=native'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
if ENV['WHISPER_METAL']
|
||||||
|
$GGML_METAL ||= true
|
||||||
|
$DEPRECATE_WARNING ||= true
|
||||||
|
end
|
||||||
|
|
||||||
|
$UNAME_S = `uname -s`.chomp
|
||||||
|
$UNAME_P = `uname -p`.chomp
|
||||||
|
$UNAME_M = `uname -m`.chomp
|
||||||
|
|
||||||
|
if $UNAME_S == 'Darwin'
|
||||||
|
unless ENV['GGML_NO_METAL']
|
||||||
|
$GGML_METAL ||= true
|
||||||
|
end
|
||||||
|
$GGML_NO_OPENMP ||= true
|
||||||
|
end
|
||||||
|
|
||||||
|
if $GGML_METAL
|
||||||
|
$GGML_METAL_EMBED_LIBRARY = true
|
||||||
|
end
|
||||||
|
|
||||||
|
$MK_CPPFLAGS = '-Iggml/include -Iggml/src -Iggml/src/ggml-cpu -Iinclude -Isrc -Iexamples'
|
||||||
|
$MK_CFLAGS = '-std=c11 -fPIC'
|
||||||
|
$MK_CXXFLAGS = '-std=c++17 -fPIC'
|
||||||
|
$MK_NVCCFLAGS = '-std=c++17'
|
||||||
|
$MK_LDFLAGS = ''
|
||||||
|
|
||||||
|
$OBJ_GGML = []
|
||||||
|
$OBJ_WHISPER = []
|
||||||
|
$OBJ_COMMON = []
|
||||||
|
$OBJ_SDL = []
|
||||||
|
|
||||||
|
$MK_CPPFLAGS << ' -D_XOPEN_SOURCE=600'
|
||||||
|
|
||||||
|
if $UNAME_S == 'Linux'
|
||||||
|
$MK_CPPFLAGS << ' -D_GNU_SOURCE'
|
||||||
|
end
|
||||||
|
|
||||||
|
if $UNAME_S == 'Darwin'
|
||||||
|
$MK_CPPFLAGS << ' -D_DARWIN_C_SOURCE'
|
||||||
|
end
|
||||||
|
|
||||||
|
if ENV['WHISPER_DEBUG']
|
||||||
|
$MK_CFLAGS << ' -O0 -g'
|
||||||
|
$MK_CXXFLAGS << ' -O0 -g'
|
||||||
|
$MK_LDFLAGS << ' -g'
|
||||||
|
$MK_NVCCFLAGS << ' -O0 -g'
|
||||||
|
else
|
||||||
|
$MK_CPPFLAGS << ' -DNDEBUG'
|
||||||
|
$MK_CFLAGS << ' -O3'
|
||||||
|
$MK_CXXFLAGS << ' -O3'
|
||||||
|
$MK_NVCCFLAGS << ' -O3'
|
||||||
|
end
|
||||||
|
|
||||||
|
$WARN_FLAGS =
|
||||||
|
' -Wall' <<
|
||||||
|
' -Wextra' <<
|
||||||
|
' -Wpedantic' <<
|
||||||
|
' -Wcast-qual' <<
|
||||||
|
' -Wno-unused-function'
|
||||||
|
|
||||||
|
$MK_CFLAGS <<
|
||||||
|
$WARN_FLAGS <<
|
||||||
|
' -Wshadow' <<
|
||||||
|
' -Wstrict-prototypes' <<
|
||||||
|
' -Wpointer-arith' <<
|
||||||
|
' -Wmissing-prototypes' <<
|
||||||
|
' -Werror=implicit-int' <<
|
||||||
|
' -Werror=implicit-function-declaration'
|
||||||
|
|
||||||
|
$MK_CXXFLAGS <<
|
||||||
|
$WARN_FLAGS <<
|
||||||
|
' -Wmissing-declarations' <<
|
||||||
|
' -Wmissing-noreturn'
|
||||||
|
|
||||||
|
unless `#{cc_command} #{$LDFLAGS} -Wl,-v 2>&1`.chomp.include? 'dyld-1015.7'
|
||||||
|
$MK_CPPFLAGS << ' -DHAVE_BUGGY_APPLE_LINKER'
|
||||||
|
end
|
||||||
|
|
||||||
|
if %w[Linux Darwin FreeBSD NetBSD OpenBSD Haiku].include? $UNAME_S
|
||||||
|
$MK_CFLAGS << ' -pthread'
|
||||||
|
$MK_CXXFLAGS << ' -pthread'
|
||||||
|
end
|
||||||
|
|
||||||
|
unless $_WIN32
|
||||||
|
$DSO_EXT = '.so'
|
||||||
|
else
|
||||||
|
$DSO_EXT = '.dll'
|
||||||
|
end
|
||||||
|
|
||||||
|
unless ENV['RISCV']
|
||||||
|
if %w[x86_64 i686 amd64].include? $UNAME_M
|
||||||
|
$HOST_CXXFLAGS ||= ''
|
||||||
|
|
||||||
|
$MK_CFLAGS << ' -march=native -mtune=native'
|
||||||
|
$HOST_CXXFLAGS << ' -march=native -mtune=native'
|
||||||
|
end
|
||||||
|
else
|
||||||
|
$MK_CFLAGS << ' -march=rv64gcv -mabi=lp64d'
|
||||||
|
$MK_CXXFLAGS << ' -march=rv64gcv -mabi=lp64d'
|
||||||
|
end
|
||||||
|
|
||||||
|
unless ENV['GGML_NO_ACCELERATE']
|
||||||
|
if $UNAME_S == 'Darwin'
|
||||||
|
$MK_CPPFLAGS << ' -DGGML_USE_ACCELERATE -DGGML_USE_BLAS -DGGML_BLAS_USE_ACCELERATE'
|
||||||
|
$MK_CPPFLAGS << ' -DACCELERATE_NEW_LAPACK'
|
||||||
|
$MK_CPPFLAGS << ' -DACCELERATE_LAPACK_ILP64'
|
||||||
|
$MK_LDFLAGS << ' -framework Accelerate'
|
||||||
|
$OBJ_GGML << 'ggml/src/ggml-blas/ggml-blas.o'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
if ENV['GGML_OPENBLAS']
|
||||||
|
$MK_CPPFLAGS << " -DGGML_USE_BLAS #{`pkg-config --cflags-only-I openblas`.chomp}"
|
||||||
|
$MK_CFLAGS << " #{`pkg-config --cflags-only-other openblas)`.chomp}"
|
||||||
|
$MK_LDFLAGS << " #{`pkg-config --libs openblas`}"
|
||||||
|
$OBJ_GGML << 'ggml/src/ggml-blas/ggml-blas.o'
|
||||||
|
end
|
||||||
|
|
||||||
|
if ENV['GGML_OPENBLAS64']
|
||||||
|
$MK_CPPFLAGS << " -DGGML_USE_BLAS #{`pkg-config --cflags-only-I openblas64`.chomp}"
|
||||||
|
$MK_CFLAGS << " #{`pkg-config --cflags-only-other openblas64)`.chomp}"
|
||||||
|
$MK_LDFLAGS << " #{`pkg-config --libs openblas64`}"
|
||||||
|
$OBJ_GGML << 'ggml/src/ggml-blas/ggml-blas.o'
|
||||||
|
end
|
||||||
|
|
||||||
|
if $GGML_METAL
|
||||||
|
$MK_CPPFLAGS << ' -DGGML_USE_METAL'
|
||||||
|
$MK_LDFLAGS << ' -framework Foundation -framework Metal -framework MetalKit'
|
||||||
|
$OBJ_GGML << 'ggml/src/ggml-metal/ggml-metal.o'
|
||||||
|
|
||||||
|
if ENV['GGML_METAL_NDEBUG']
|
||||||
|
$MK_CPPFLAGS << ' -DGGML_METAL_NDEBUG'
|
||||||
|
end
|
||||||
|
|
||||||
|
if $GGML_METAL_EMBED_LIBRARY
|
||||||
|
$MK_CPPFLAGS << ' -DGGML_METAL_EMBED_LIBRARY'
|
||||||
|
$OBJ_GGML << 'ggml/src/ggml-metal/ggml-metal-embed.o'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
$OBJ_GGML <<
|
||||||
|
'ggml/src/ggml.o' <<
|
||||||
|
'ggml/src/ggml-alloc.o' <<
|
||||||
|
'ggml/src/ggml-backend.o' <<
|
||||||
|
'ggml/src/ggml-backend-reg.o' <<
|
||||||
|
'ggml/src/ggml-opt.o' <<
|
||||||
|
'ggml/src/ggml-quants.o' <<
|
||||||
|
'ggml/src/ggml-threading.o' <<
|
||||||
|
'ggml/src/ggml-cpu/ggml-cpu.o' <<
|
||||||
|
'ggml/src/ggml-cpu/ggml-cpu-cpp.o' <<
|
||||||
|
'ggml/src/ggml-cpu/ggml-cpu-aarch64.o' <<
|
||||||
|
'ggml/src/ggml-cpu/ggml-cpu-hbm.o' <<
|
||||||
|
'ggml/src/ggml-cpu/ggml-cpu-quants.o' <<
|
||||||
|
'ggml/src/ggml-cpu/ggml-cpu-traits.o'
|
||||||
|
|
||||||
|
$OBJ_WHISPER <<
|
||||||
|
'src/whisper.o'
|
||||||
|
|
||||||
|
$objs = $OBJ_GGML + $OBJ_WHISPER + $OBJ_COMMON + $OBJ_SDL
|
||||||
|
$objs << "ruby_whisper.o"
|
||||||
|
|
||||||
|
$CPPFLAGS = "#{$MK_CPPFLAGS} #{$CPPFLAGS}"
|
||||||
|
$CFLAGS = "#{$CPPFLAGS} #{$MK_CFLAGS} #{$GF_CFLAGS} #{$CFLAGS}"
|
||||||
|
$BASE_CXXFLAGS = "#{$MK_CXXFLAGS} #{$CXXFLAGS}"
|
||||||
|
$CXXFLAGS = "#{$BASE_CXXFLAGS} #{$HOST_CXXFLAGS} #{$GF_CXXFLAGS} #{$CPPFLAGS}"
|
||||||
|
$NVCCFLAGS = "#{$MK_NVCCFLAGS} #{$NVCCFLAGS}"
|
||||||
|
$LDFLAGS = "#{$MK_LDFLAGS} #{$LDFLAGS}"
|
||||||
|
|
||||||
create_makefile('whisper')
|
create_makefile('whisper')
|
||||||
|
|
||||||
|
File.open 'Makefile', 'a' do |file|
|
||||||
|
file.puts 'include scripts/get-flags.mk'
|
||||||
|
file.puts 'include cpu.mk'
|
||||||
|
|
||||||
|
if $GGML_METAL
|
||||||
|
file.puts 'include metal.mk'
|
||||||
|
|
||||||
|
if $GGML_METAL_EMBED_LIBRARY
|
||||||
|
file.puts 'include metal-embed.mk'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
@ -1,141 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
// ggml-backend internal header
|
|
||||||
|
|
||||||
#include "ggml-backend.h"
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
//
|
|
||||||
// Backend buffer
|
|
||||||
//
|
|
||||||
|
|
||||||
// buffer type
|
|
||||||
typedef void * ggml_backend_buffer_type_context_t;
|
|
||||||
|
|
||||||
struct ggml_backend_buffer_type_i {
|
|
||||||
const char * (*GGML_CALL get_name) (ggml_backend_buffer_type_t buft);
|
|
||||||
ggml_backend_buffer_t (*GGML_CALL alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size);
|
|
||||||
size_t (*GGML_CALL get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment
|
|
||||||
size_t (*GGML_CALL get_max_size) (ggml_backend_buffer_type_t buft); // allocation max size
|
|
||||||
size_t (*GGML_CALL get_alloc_size) (ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
|
|
||||||
bool (*GGML_CALL supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend
|
|
||||||
// check if tensor data is in host memory
|
|
||||||
// should be equivalent to supports_backend(buft, ggml_backend_cpu_init())
|
|
||||||
bool (*GGML_CALL is_host) (ggml_backend_buffer_type_t buft);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ggml_backend_buffer_type {
|
|
||||||
struct ggml_backend_buffer_type_i iface;
|
|
||||||
ggml_backend_buffer_type_context_t context;
|
|
||||||
};
|
|
||||||
|
|
||||||
// buffer
|
|
||||||
typedef void * ggml_backend_buffer_context_t;
|
|
||||||
|
|
||||||
struct ggml_backend_buffer_i {
|
|
||||||
const char * (*GGML_CALL get_name) (ggml_backend_buffer_t buffer);
|
|
||||||
void (*GGML_CALL free_buffer)(ggml_backend_buffer_t buffer);
|
|
||||||
void * (*GGML_CALL get_base) (ggml_backend_buffer_t buffer);
|
|
||||||
void (*GGML_CALL init_tensor)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
|
||||||
void (*GGML_CALL set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
||||||
void (*GGML_CALL get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
||||||
bool (*GGML_CALL cpy_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst); // dst is in the buffer, src may be in any buffer
|
|
||||||
void (*GGML_CALL clear) (ggml_backend_buffer_t buffer, uint8_t value);
|
|
||||||
void (*GGML_CALL reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ggml_backend_buffer {
|
|
||||||
struct ggml_backend_buffer_i iface;
|
|
||||||
ggml_backend_buffer_type_t buft;
|
|
||||||
ggml_backend_buffer_context_t context;
|
|
||||||
size_t size;
|
|
||||||
enum ggml_backend_buffer_usage usage;
|
|
||||||
};
|
|
||||||
|
|
||||||
GGML_CALL ggml_backend_buffer_t ggml_backend_buffer_init(
|
|
||||||
ggml_backend_buffer_type_t buft,
|
|
||||||
struct ggml_backend_buffer_i iface,
|
|
||||||
ggml_backend_buffer_context_t context,
|
|
||||||
size_t size);
|
|
||||||
|
|
||||||
// do not use directly, use ggml_backend_tensor_copy instead
|
|
||||||
bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst);
|
|
||||||
|
|
||||||
// buffer that contains a collection of buffers
|
|
||||||
GGML_CALL ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers);
|
|
||||||
GGML_CALL bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer);
|
|
||||||
GGML_CALL void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage);
|
|
||||||
|
|
||||||
//
|
|
||||||
// Backend
|
|
||||||
//
|
|
||||||
|
|
||||||
typedef void * ggml_backend_context_t;
|
|
||||||
|
|
||||||
struct ggml_backend_i {
|
|
||||||
const char * (*GGML_CALL get_name)(ggml_backend_t backend);
|
|
||||||
|
|
||||||
void (*GGML_CALL free)(ggml_backend_t backend);
|
|
||||||
|
|
||||||
// buffer allocation
|
|
||||||
ggml_backend_buffer_type_t (*GGML_CALL get_default_buffer_type)(ggml_backend_t backend);
|
|
||||||
|
|
||||||
// (optional) asynchronous tensor data access
|
|
||||||
void (*GGML_CALL set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
||||||
void (*GGML_CALL get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
||||||
bool (*GGML_CALL cpy_tensor_async)(ggml_backend_t backend_src, ggml_backend_t backend_dst, const struct ggml_tensor * src, struct ggml_tensor * dst);
|
|
||||||
|
|
||||||
// (optional) complete all pending operations
|
|
||||||
void (*GGML_CALL synchronize)(ggml_backend_t backend);
|
|
||||||
|
|
||||||
// compute graph with a plan (not used currently)
|
|
||||||
ggml_backend_graph_plan_t (*GGML_CALL graph_plan_create) (ggml_backend_t backend, const struct ggml_cgraph * cgraph);
|
|
||||||
void (*GGML_CALL graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
|
||||||
|
|
||||||
// compute graph with a plan
|
|
||||||
enum ggml_status (*GGML_CALL graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
|
||||||
// compute graph without a plan (async)
|
|
||||||
enum ggml_status (*GGML_CALL graph_compute) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
|
||||||
|
|
||||||
// check if the backend supports an operation
|
|
||||||
bool (*GGML_CALL supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
|
|
||||||
|
|
||||||
// check if the backend wants to run an operation, even if the weights are allocated in a CPU buffer
|
|
||||||
// these should be expensive operations with large batch sizes that may benefit from running on this backend
|
|
||||||
// even if the weight has to be copied from the CPU temporarily
|
|
||||||
bool (*GGML_CALL offload_op)(ggml_backend_t backend, const struct ggml_tensor * op);
|
|
||||||
|
|
||||||
// (optional) event synchronization
|
|
||||||
ggml_backend_event_t (*GGML_CALL event_new) (ggml_backend_t backend);
|
|
||||||
void (*GGML_CALL event_free) (ggml_backend_event_t event);
|
|
||||||
void (*GGML_CALL event_record) (ggml_backend_event_t event);
|
|
||||||
void (*GGML_CALL event_wait) (ggml_backend_t backend, ggml_backend_event_t event);
|
|
||||||
void (*GGML_CALL event_synchronize) (ggml_backend_event_t event);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ggml_backend {
|
|
||||||
ggml_guid_t guid;
|
|
||||||
|
|
||||||
struct ggml_backend_i iface;
|
|
||||||
ggml_backend_context_t context;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ggml_backend_event {
|
|
||||||
ggml_backend_t backend;
|
|
||||||
void * context;
|
|
||||||
};
|
|
||||||
|
|
||||||
//
|
|
||||||
// Backend registry
|
|
||||||
//
|
|
||||||
|
|
||||||
typedef ggml_backend_t (*GGML_CALL ggml_backend_init_fn)(const char * params, void * user_data);
|
|
||||||
|
|
||||||
GGML_CALL void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
File diff suppressed because it is too large
Load Diff
@ -1,233 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include "ggml.h"
|
|
||||||
#include "ggml-alloc.h"
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t;
|
|
||||||
typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
|
|
||||||
typedef struct ggml_backend_event * ggml_backend_event_t;
|
|
||||||
typedef struct ggml_backend * ggml_backend_t;
|
|
||||||
typedef void * ggml_backend_graph_plan_t;
|
|
||||||
|
|
||||||
//
|
|
||||||
// Backend buffer
|
|
||||||
//
|
|
||||||
|
|
||||||
// buffer type
|
|
||||||
GGML_API const char * ggml_backend_buft_name (ggml_backend_buffer_type_t buft);
|
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_buft_alloc_buffer (ggml_backend_buffer_type_t buft, size_t size);
|
|
||||||
GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft);
|
|
||||||
GGML_API size_t ggml_backend_buft_get_max_size (ggml_backend_buffer_type_t buft);
|
|
||||||
GGML_API GGML_CALL size_t ggml_backend_buft_get_alloc_size (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor);
|
|
||||||
GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend);
|
|
||||||
GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft);
|
|
||||||
|
|
||||||
// buffer
|
|
||||||
enum ggml_backend_buffer_usage {
|
|
||||||
GGML_BACKEND_BUFFER_USAGE_ANY = 0,
|
|
||||||
GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
GGML_API const char * ggml_backend_buffer_name (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API GGML_CALL void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
|
||||||
GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API size_t ggml_backend_buffer_get_max_size (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
|
||||||
GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value);
|
|
||||||
GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage);
|
|
||||||
GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_get_type (ggml_backend_buffer_t buffer);
|
|
||||||
GGML_API void ggml_backend_buffer_reset (ggml_backend_buffer_t buffer);
|
|
||||||
|
|
||||||
//
|
|
||||||
// Backend
|
|
||||||
//
|
|
||||||
|
|
||||||
GGML_API ggml_guid_t ggml_backend_guid(ggml_backend_t backend);
|
|
||||||
GGML_API const char * ggml_backend_name(ggml_backend_t backend);
|
|
||||||
GGML_API void ggml_backend_free(ggml_backend_t backend);
|
|
||||||
|
|
||||||
GGML_API ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend);
|
|
||||||
GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size);
|
|
||||||
GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend);
|
|
||||||
GGML_API size_t ggml_backend_get_max_size(ggml_backend_t backend);
|
|
||||||
|
|
||||||
GGML_API void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
||||||
GGML_API void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
|
||||||
GGML_API GGML_CALL void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
|
|
||||||
|
|
||||||
GGML_API void ggml_backend_synchronize(ggml_backend_t backend);
|
|
||||||
|
|
||||||
GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
|
||||||
GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
|
||||||
|
|
||||||
GGML_API enum ggml_status ggml_backend_graph_plan_compute (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
|
|
||||||
GGML_API enum ggml_status ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
|
||||||
GGML_API enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph);
|
|
||||||
GGML_API bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op);
|
|
||||||
GGML_API bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op);
|
|
||||||
|
|
||||||
// tensor copy between different backends
|
|
||||||
GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst);
|
|
||||||
|
|
||||||
// asynchronous copy
|
|
||||||
// the copy is performed after all the currently queued operations in backend_src
|
|
||||||
// backend_dst will wait for the copy to complete before performing other operations
|
|
||||||
// automatic fallback to sync copy if async is not supported
|
|
||||||
GGML_API void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst);
|
|
||||||
|
|
||||||
// events
|
|
||||||
GGML_API ggml_backend_event_t ggml_backend_event_new (ggml_backend_t backend);
|
|
||||||
GGML_API void ggml_backend_event_free (ggml_backend_event_t event);
|
|
||||||
GGML_API void ggml_backend_event_record (ggml_backend_event_t event);
|
|
||||||
GGML_API void ggml_backend_event_synchronize(ggml_backend_event_t event);
|
|
||||||
GGML_API void ggml_backend_event_wait (ggml_backend_t backend, ggml_backend_event_t event); // wait async on event
|
|
||||||
|
|
||||||
//
|
|
||||||
// CPU backend
|
|
||||||
//
|
|
||||||
|
|
||||||
GGML_API ggml_backend_t ggml_backend_cpu_init(void);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL bool ggml_backend_is_cpu (ggml_backend_t backend);
|
|
||||||
GGML_API void ggml_backend_cpu_set_n_threads (ggml_backend_t backend_cpu, int n_threads);
|
|
||||||
GGML_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data);
|
|
||||||
|
|
||||||
// Create a backend buffer from an existing pointer
|
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void);
|
|
||||||
|
|
||||||
#ifdef GGML_USE_CPU_HBM
|
|
||||||
GGML_API ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
//
|
|
||||||
// Backend registry
|
|
||||||
//
|
|
||||||
|
|
||||||
// The backend registry is a registry of all the available backends, and allows initializing backends in a generic way
|
|
||||||
|
|
||||||
GGML_API size_t ggml_backend_reg_get_count(void);
|
|
||||||
GGML_API size_t ggml_backend_reg_find_by_name(const char * name);
|
|
||||||
GGML_API ggml_backend_t ggml_backend_reg_init_backend_from_str(const char * backend_str); // str is name[:params]
|
|
||||||
GGML_API const char * ggml_backend_reg_get_name(size_t i);
|
|
||||||
GGML_API ggml_backend_t ggml_backend_reg_init_backend(size_t i, const char * params); // params is backend-specific
|
|
||||||
GGML_API ggml_backend_buffer_type_t ggml_backend_reg_get_default_buffer_type(size_t i);
|
|
||||||
GGML_API ggml_backend_buffer_t ggml_backend_reg_alloc_buffer(size_t i, size_t size);
|
|
||||||
|
|
||||||
//
|
|
||||||
// Backend scheduler
|
|
||||||
//
|
|
||||||
|
|
||||||
// The backend scheduler allows for multiple backends to be used together
|
|
||||||
// Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends
|
|
||||||
// The backends are selected based on:
|
|
||||||
// - the backend that supports the operation
|
|
||||||
// - the location of the pre-allocated tensors (e.g. the weights)
|
|
||||||
/*
|
|
||||||
Example usage:
|
|
||||||
|
|
||||||
// operations that use tensors allocated in a buffer with USAGE_WEIGHTS will be assigned
|
|
||||||
// preferrably to run on the same backend as the buffer
|
|
||||||
ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
|
|
||||||
|
|
||||||
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false);
|
|
||||||
|
|
||||||
// initialize buffers from a max size graph (optional)
|
|
||||||
reserve_graph = build_graph(sched, max_batch_size);
|
|
||||||
|
|
||||||
// manually assign nodes to a backend (optional, should not be needed in most cases)
|
|
||||||
struct ggml_tensor * node = ggml_mul_mat(ctx, ...);
|
|
||||||
ggml_backend_sched_set_tensor_backend(sched, node, backend_gpu);
|
|
||||||
|
|
||||||
ggml_backend_sched_reserve(sched, reserve_graph);
|
|
||||||
|
|
||||||
// compute
|
|
||||||
graph = build_graph(sched);
|
|
||||||
ggml_backend_sched_graph_compute(sched, graph);
|
|
||||||
|
|
||||||
// if there are graph inputs:
|
|
||||||
ggml_backend_sched_reset(sched);
|
|
||||||
ggml_backend_sched_alloc_graph(sched, graph);
|
|
||||||
ggml_backend_tensor_set(input_tensor, ...);
|
|
||||||
ggml_backend_sched_graph_compute(sched, graph);
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct ggml_backend_sched;
|
|
||||||
typedef struct ggml_backend_sched * ggml_backend_sched_t;
|
|
||||||
|
|
||||||
// when ask == true, the scheduler wants to know if the user wants to observe this node
|
|
||||||
// this allows the scheduler to batch nodes together in order to evaluate them in a single call
|
|
||||||
//
|
|
||||||
// when ask == false, the scheduler is passing the node tensor to the user for observation
|
|
||||||
// if the user returns false, the scheduler will cancel the graph compute
|
|
||||||
//
|
|
||||||
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
|
|
||||||
|
|
||||||
// Initialize a backend scheduler
|
|
||||||
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
|
|
||||||
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
|
||||||
|
|
||||||
// Initialize backend buffers from a measure graph
|
|
||||||
GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
|
|
||||||
|
|
||||||
// Get the number of splits of the last graph
|
|
||||||
GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched);
|
|
||||||
GGML_API int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched);
|
|
||||||
|
|
||||||
GGML_API size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend);
|
|
||||||
|
|
||||||
GGML_API void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
|
|
||||||
GGML_API ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node);
|
|
||||||
|
|
||||||
// Allocate and compute graph on the backend scheduler
|
|
||||||
GGML_API bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
|
||||||
GGML_API enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
|
||||||
GGML_API enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
|
|
||||||
GGML_API void ggml_backend_sched_synchronize(ggml_backend_sched_t sched);
|
|
||||||
|
|
||||||
// Reset all assignments and allocators - must be called before changing the node backends
|
|
||||||
GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched);
|
|
||||||
|
|
||||||
// Set a callback to be called for each resulting node during graph compute
|
|
||||||
GGML_API void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data);
|
|
||||||
|
|
||||||
//
|
|
||||||
// Utils
|
|
||||||
//
|
|
||||||
|
|
||||||
struct ggml_backend_graph_copy {
|
|
||||||
ggml_backend_buffer_t buffer;
|
|
||||||
struct ggml_context * ctx_allocated;
|
|
||||||
struct ggml_context * ctx_unallocated;
|
|
||||||
struct ggml_cgraph * graph;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Copy a graph to a different backend
|
|
||||||
GGML_API struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph);
|
|
||||||
GGML_API void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy);
|
|
||||||
|
|
||||||
typedef bool (*GGML_CALL ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data);
|
|
||||||
|
|
||||||
// Compare the output of two backends
|
|
||||||
GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data);
|
|
||||||
|
|
||||||
// Tensor initialization
|
|
||||||
GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr);
|
|
||||||
GGML_API void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,43 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include "ggml.h"
|
|
||||||
#include "ggml-backend.h"
|
|
||||||
|
|
||||||
#ifdef GGML_USE_HIPBLAS
|
|
||||||
#define GGML_CUDA_NAME "ROCm"
|
|
||||||
#define GGML_CUBLAS_NAME "hipBLAS"
|
|
||||||
#else
|
|
||||||
#define GGML_CUDA_NAME "CUDA"
|
|
||||||
#define GGML_CUBLAS_NAME "cuBLAS"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define GGML_CUDA_MAX_DEVICES 16
|
|
||||||
|
|
||||||
// backend API
|
|
||||||
GGML_API GGML_CALL ggml_backend_t ggml_backend_cuda_init(int device);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL bool ggml_backend_is_cuda(ggml_backend_t backend);
|
|
||||||
|
|
||||||
// device buffer
|
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
|
|
||||||
|
|
||||||
// split tensor buffer that splits matrices by rows across multiple devices
|
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split);
|
|
||||||
|
|
||||||
// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
|
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL int ggml_backend_cuda_get_device_count(void);
|
|
||||||
GGML_API GGML_CALL void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
|
|
||||||
GGML_API GGML_CALL void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size);
|
|
||||||
GGML_API GGML_CALL void ggml_backend_cuda_unregister_host_buffer(void * buffer);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,272 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include "ggml.h"
|
|
||||||
|
|
||||||
// GGML internal header
|
|
||||||
|
|
||||||
#include <assert.h>
|
|
||||||
#include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
|
|
||||||
#include <stddef.h>
|
|
||||||
#include <stdbool.h>
|
|
||||||
#include <string.h> // memcpy
|
|
||||||
#include <math.h> // fabsf
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// static_assert should be a #define, but if it's not,
|
|
||||||
// fall back to the _Static_assert C11 keyword.
|
|
||||||
// if C99 - static_assert is noop
|
|
||||||
// ref: https://stackoverflow.com/a/53923785/4039976
|
|
||||||
#ifndef __cplusplus
|
|
||||||
#ifndef static_assert
|
|
||||||
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
|
|
||||||
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
|
||||||
#else
|
|
||||||
#define static_assert(cond, msg) struct global_scope_noop_trick
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
|
|
||||||
#if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
|
|
||||||
#ifndef __FMA__
|
|
||||||
#define __FMA__
|
|
||||||
#endif
|
|
||||||
#ifndef __F16C__
|
|
||||||
#define __F16C__
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// __SSE3__ and __SSSE3__ are not defined in MSVC, but SSE3/SSSE3 are present when AVX/AVX2/AVX512 are available
|
|
||||||
#if defined(_MSC_VER) && (defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__))
|
|
||||||
#ifndef __SSE3__
|
|
||||||
#define __SSE3__
|
|
||||||
#endif
|
|
||||||
#ifndef __SSSE3__
|
|
||||||
#define __SSSE3__
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// 16-bit float
|
|
||||||
// on Arm, we use __fp16
|
|
||||||
// on x86, we use uint16_t
|
|
||||||
#if defined(__ARM_NEON) && !defined(_MSC_VER)
|
|
||||||
|
|
||||||
// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
|
|
||||||
//
|
|
||||||
// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
|
|
||||||
//
|
|
||||||
#include <arm_neon.h>
|
|
||||||
|
|
||||||
typedef __fp16 ggml_fp16_internal_t;
|
|
||||||
|
|
||||||
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
||||||
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
|
||||||
|
|
||||||
#define GGML_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
||||||
|
|
||||||
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
|
||||||
ggml_fp16_internal_t tmp;
|
|
||||||
memcpy(&tmp, &h, sizeof(ggml_fp16_t));
|
|
||||||
return (float)tmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
|
||||||
ggml_fp16_t res;
|
|
||||||
ggml_fp16_internal_t tmp = f;
|
|
||||||
memcpy(&res, &tmp, sizeof(ggml_fp16_t));
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
typedef uint16_t ggml_fp16_internal_t;
|
|
||||||
|
|
||||||
#ifdef __wasm_simd128__
|
|
||||||
#include <wasm_simd128.h>
|
|
||||||
#else
|
|
||||||
#ifdef __POWER9_VECTOR__
|
|
||||||
#include <altivec.h>
|
|
||||||
#undef bool
|
|
||||||
#define bool _Bool
|
|
||||||
#else
|
|
||||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
|
||||||
#include <intrin.h>
|
|
||||||
#else
|
|
||||||
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
|
|
||||||
#if !defined(__riscv)
|
|
||||||
#include <immintrin.h>
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __riscv_v_intrinsic
|
|
||||||
#include <riscv_vector.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __F16C__
|
|
||||||
|
|
||||||
#ifdef _MSC_VER
|
|
||||||
#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
|
|
||||||
#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
|
|
||||||
#else
|
|
||||||
#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
|
|
||||||
#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#elif defined(__POWER9_VECTOR__)
|
|
||||||
|
|
||||||
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
||||||
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
|
||||||
/* the inline asm below is about 12% faster than the lookup method */
|
|
||||||
#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
|
|
||||||
#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
|
|
||||||
|
|
||||||
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
|
||||||
register float f;
|
|
||||||
register double d;
|
|
||||||
__asm__(
|
|
||||||
"mtfprd %0,%2\n"
|
|
||||||
"xscvhpdp %0,%0\n"
|
|
||||||
"frsp %1,%0\n" :
|
|
||||||
/* temp */ "=d"(d),
|
|
||||||
/* out */ "=f"(f):
|
|
||||||
/* in */ "r"(h));
|
|
||||||
return f;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
|
||||||
register double d;
|
|
||||||
register ggml_fp16_t r;
|
|
||||||
__asm__( /* xscvdphp can work on double or single precision */
|
|
||||||
"xscvdphp %0,%2\n"
|
|
||||||
"mffprd %1,%0\n" :
|
|
||||||
/* temp */ "=d"(d),
|
|
||||||
/* out */ "=r"(r):
|
|
||||||
/* in */ "f"(f));
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
// FP16 <-> FP32
|
|
||||||
// ref: https://github.com/Maratyszcza/FP16
|
|
||||||
|
|
||||||
static inline float fp32_from_bits(uint32_t w) {
|
|
||||||
union {
|
|
||||||
uint32_t as_bits;
|
|
||||||
float as_value;
|
|
||||||
} fp32;
|
|
||||||
fp32.as_bits = w;
|
|
||||||
return fp32.as_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline uint32_t fp32_to_bits(float f) {
|
|
||||||
union {
|
|
||||||
float as_value;
|
|
||||||
uint32_t as_bits;
|
|
||||||
} fp32;
|
|
||||||
fp32.as_value = f;
|
|
||||||
return fp32.as_bits;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
|
|
||||||
const uint32_t w = (uint32_t) h << 16;
|
|
||||||
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
||||||
const uint32_t two_w = w + w;
|
|
||||||
|
|
||||||
const uint32_t exp_offset = UINT32_C(0xE0) << 23;
|
|
||||||
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
|
|
||||||
const float exp_scale = 0x1.0p-112f;
|
|
||||||
#else
|
|
||||||
const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
|
|
||||||
#endif
|
|
||||||
const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
|
|
||||||
|
|
||||||
const uint32_t magic_mask = UINT32_C(126) << 23;
|
|
||||||
const float magic_bias = 0.5f;
|
|
||||||
const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
|
|
||||||
|
|
||||||
const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
|
|
||||||
const uint32_t result = sign |
|
|
||||||
(two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
|
|
||||||
return fp32_from_bits(result);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
|
||||||
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
|
|
||||||
const float scale_to_inf = 0x1.0p+112f;
|
|
||||||
const float scale_to_zero = 0x1.0p-110f;
|
|
||||||
#else
|
|
||||||
const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
|
|
||||||
const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
|
|
||||||
#endif
|
|
||||||
float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
|
|
||||||
|
|
||||||
const uint32_t w = fp32_to_bits(f);
|
|
||||||
const uint32_t shl1_w = w + w;
|
|
||||||
const uint32_t sign = w & UINT32_C(0x80000000);
|
|
||||||
uint32_t bias = shl1_w & UINT32_C(0xFF000000);
|
|
||||||
if (bias < UINT32_C(0x71000000)) {
|
|
||||||
bias = UINT32_C(0x71000000);
|
|
||||||
}
|
|
||||||
|
|
||||||
base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
|
|
||||||
const uint32_t bits = fp32_to_bits(base);
|
|
||||||
const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
|
|
||||||
const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
|
|
||||||
const uint32_t nonsign = exp_bits + mantissa_bits;
|
|
||||||
return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
|
||||||
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
|
||||||
|
|
||||||
#endif // __F16C__
|
|
||||||
|
|
||||||
#endif // __ARM_NEON
|
|
||||||
|
|
||||||
// precomputed f32 table for f16 (256 KB)
|
|
||||||
// defined in ggml.c, initialized in ggml_init()
|
|
||||||
extern float ggml_table_f32_f16[1 << 16];
|
|
||||||
|
|
||||||
// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
|
|
||||||
// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
|
|
||||||
// This is also true for POWER9.
|
|
||||||
#if !defined(GGML_FP16_TO_FP32)
|
|
||||||
inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
|
|
||||||
uint16_t s;
|
|
||||||
memcpy(&s, &f, sizeof(uint16_t));
|
|
||||||
return ggml_table_f32_f16[s];
|
|
||||||
}
|
|
||||||
|
|
||||||
#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if !defined(GGML_FP32_TO_FP16)
|
|
||||||
#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define GGML_HASHTABLE_FULL ((size_t)-1)
|
|
||||||
#define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
|
|
||||||
|
|
||||||
struct ggml_hash_set ggml_hash_set_new(size_t size);
|
|
||||||
|
|
||||||
bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
|
||||||
|
|
||||||
// returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
|
|
||||||
size_t ggml_hash_find (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
|
||||||
|
|
||||||
// returns GGML_HASHTABLE_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
|
|
||||||
size_t ggml_hash_insert ( struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
|
||||||
|
|
||||||
// return index, asserts if table is full
|
|
||||||
size_t ggml_hash_find_or_insert( struct ggml_hash_set hash_set, struct ggml_tensor * key);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,66 +0,0 @@
|
|||||||
// An interface allowing to compute ggml_cgraph with Metal
|
|
||||||
//
|
|
||||||
// This is a fully functional interface that extends ggml with GPU support for Apple devices.
|
|
||||||
// A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.)
|
|
||||||
//
|
|
||||||
// How it works?
|
|
||||||
//
|
|
||||||
// As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this
|
|
||||||
// interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you
|
|
||||||
// use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.)
|
|
||||||
//
|
|
||||||
// You only need to make sure that all memory buffers that you used during the graph creation
|
|
||||||
// are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is
|
|
||||||
// used during the graph evaluation to determine the arguments of the compute kernels.
|
|
||||||
//
|
|
||||||
// Synchronization between device and host memory (for example for input and output tensors)
|
|
||||||
// is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions.
|
|
||||||
//
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "ggml.h"
|
|
||||||
#include "ggml-backend.h"
|
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
#include <stdbool.h>
|
|
||||||
|
|
||||||
// max memory buffers that can be mapped to the device
|
|
||||||
#define GGML_METAL_MAX_BUFFERS 64
|
|
||||||
|
|
||||||
struct ggml_tensor;
|
|
||||||
struct ggml_cgraph;
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
//
|
|
||||||
// backend API
|
|
||||||
// user-code should use only these functions
|
|
||||||
//
|
|
||||||
|
|
||||||
GGML_API void ggml_backend_metal_log_set_callback(ggml_log_callback log_callback, void * user_data);
|
|
||||||
|
|
||||||
GGML_API ggml_backend_t ggml_backend_metal_init(void);
|
|
||||||
|
|
||||||
GGML_API bool ggml_backend_is_metal(ggml_backend_t backend);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size);
|
|
||||||
|
|
||||||
GGML_API void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void);
|
|
||||||
|
|
||||||
// helper to check if the device supports a specific family
|
|
||||||
// ideally, the user code should be doing these checks
|
|
||||||
// ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
|
|
||||||
GGML_API bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family);
|
|
||||||
|
|
||||||
// capture all command buffers committed the next time `ggml_backend_graph_compute` is called
|
|
||||||
GGML_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include "ggml.h"
|
|
||||||
#include "ggml-backend.h"
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
GGML_API void ggml_cl_init(void);
|
|
||||||
|
|
||||||
GGML_API void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
|
||||||
GGML_API void ggml_cl_add(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
|
||||||
GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * dst);
|
|
||||||
GGML_API size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
|
||||||
GGML_API void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
|
|
||||||
|
|
||||||
// GGML_API void * ggml_cl_host_malloc(size_t size);
|
|
||||||
// GGML_API void ggml_cl_host_free(void * ptr);
|
|
||||||
|
|
||||||
GGML_API void ggml_cl_free_data(const struct ggml_tensor* tensor);
|
|
||||||
|
|
||||||
GGML_API void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
|
|
||||||
|
|
||||||
// backend API
|
|
||||||
|
|
||||||
// GGML_API ggml_backend_t ggml_backend_opencl_init(void);
|
|
||||||
|
|
||||||
// GGML_API bool ggml_backend_is_opencl(ggml_backend_t backend);
|
|
||||||
|
|
||||||
GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type(void);
|
|
||||||
// GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type(void);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
File diff suppressed because it is too large
Load Diff
@ -1,133 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#define GGML_COMMON_DECL_C
|
|
||||||
#include "ggml-common.h"
|
|
||||||
|
|
||||||
#include "ggml.h"
|
|
||||||
|
|
||||||
// GGML internal header
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Quantization
|
|
||||||
void quantize_row_q4_0_reference(const float * GGML_RESTRICT x, block_q4_0 * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q4_1_reference(const float * GGML_RESTRICT x, block_q4_1 * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q5_0_reference(const float * GGML_RESTRICT x, block_q5_0 * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q5_1_reference(const float * GGML_RESTRICT x, block_q5_1 * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q8_0_reference(const float * GGML_RESTRICT x, block_q8_0 * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q8_1_reference(const float * GGML_RESTRICT x, block_q8_1 * GGML_RESTRICT y, int64_t k);
|
|
||||||
|
|
||||||
void quantize_row_q2_K_reference(const float * GGML_RESTRICT x, block_q2_K * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q3_K_reference(const float * GGML_RESTRICT x, block_q3_K * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q4_K_reference(const float * GGML_RESTRICT x, block_q4_K * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q5_K_reference(const float * GGML_RESTRICT x, block_q5_K * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q6_K_reference(const float * GGML_RESTRICT x, block_q6_K * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q8_K_reference(const float * GGML_RESTRICT x, block_q8_K * GGML_RESTRICT y, int64_t k);
|
|
||||||
|
|
||||||
void quantize_row_iq3_xxs_reference(const float * GGML_RESTRICT x, block_iq3_xxs * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_iq4_nl_reference (const float * GGML_RESTRICT x, block_iq4_nl * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_iq4_xs_reference (const float * GGML_RESTRICT x, block_iq4_xs * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_iq3_s_reference (const float * GGML_RESTRICT x, block_iq3_s * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_iq2_s_reference (const float * GGML_RESTRICT x, block_iq2_s * GGML_RESTRICT y, int64_t k);
|
|
||||||
|
|
||||||
void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
|
|
||||||
void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
|
|
||||||
void quantize_row_iq3_xxs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_iq4_nl (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_iq4_xs (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_iq3_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
void quantize_row_iq2_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
|
||||||
|
|
||||||
// Dequantization
|
|
||||||
void dequantize_row_q4_0(const block_q4_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_q4_1(const block_q4_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_q5_0(const block_q5_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_q5_1(const block_q5_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_q8_0(const block_q8_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
//void dequantize_row_q8_1(const block_q8_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
|
|
||||||
void dequantize_row_q2_K(const block_q2_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_q3_K(const block_q3_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_q4_K(const block_q4_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_q5_K(const block_q5_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_q6_K(const block_q6_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_q8_K(const block_q8_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
|
|
||||||
void dequantize_row_iq2_xxs(const block_iq2_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_iq2_xs (const block_iq2_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_iq2_s (const block_iq2_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_iq3_xxs(const block_iq3_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_iq1_s (const block_iq1_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_iq1_m (const block_iq1_m * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_iq4_nl (const block_iq4_nl * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_iq4_xs (const block_iq4_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
void dequantize_row_iq3_s (const block_iq3_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
|
|
||||||
|
|
||||||
// Dot product
|
|
||||||
void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
|
|
||||||
void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
|
|
||||||
void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_iq2_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_iq2_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_iq4_nl_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_iq4_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
|
|
||||||
|
|
||||||
// Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization")
|
|
||||||
size_t quantize_iq2_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_iq2_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_iq2_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_iq3_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_iq1_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_iq1_m (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_iq4_nl (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_iq4_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_iq3_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
|
|
||||||
size_t quantize_q2_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_q3_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_q4_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_q5_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_q6_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_q4_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_q4_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_q5_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_q5_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_q8_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
|
|
||||||
void iq2xs_init_impl(enum ggml_type type);
|
|
||||||
void iq2xs_free_impl(enum ggml_type type);
|
|
||||||
void iq3xs_init_impl(int grid_size);
|
|
||||||
void iq3xs_free_impl(int grid_size);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
@ -1,49 +0,0 @@
|
|||||||
//
|
|
||||||
// MIT license
|
|
||||||
// Copyright (C) 2024 Intel Corporation
|
|
||||||
// SPDX-License-Identifier: MIT
|
|
||||||
//
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "ggml.h"
|
|
||||||
#include "ggml-backend.h"
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define GGML_SYCL_MAX_DEVICES 48
|
|
||||||
#define GGML_SYCL_NAME "SYCL"
|
|
||||||
|
|
||||||
// backend API
|
|
||||||
GGML_API ggml_backend_t ggml_backend_sycl_init(int device);
|
|
||||||
|
|
||||||
// devide buffer
|
|
||||||
GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device);
|
|
||||||
|
|
||||||
// split tensor buffer that splits matrices by rows across multiple devices
|
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split);
|
|
||||||
|
|
||||||
// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
|
|
||||||
GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type(void);
|
|
||||||
|
|
||||||
GGML_API void ggml_backend_sycl_print_sycl_devices(void);
|
|
||||||
GGML_API GGML_CALL void ggml_sycl_get_gpu_list(int *id_list, int max_len);
|
|
||||||
GGML_API GGML_CALL void ggml_sycl_get_device_description(int device, char *description, size_t description_size);
|
|
||||||
GGML_API GGML_CALL int ggml_backend_sycl_get_device_count();
|
|
||||||
GGML_API GGML_CALL void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total);
|
|
||||||
GGML_API GGML_CALL int ggml_backend_sycl_get_device_index(int device_id);
|
|
||||||
|
|
||||||
// TODO: these are temporary
|
|
||||||
// ref: https://github.com/ggerganov/llama.cpp/pull/6022#issuecomment-1992615670
|
|
||||||
GGML_API GGML_CALL int ggml_backend_sycl_get_device_id(int device_index);
|
|
||||||
GGML_API GGML_CALL void ggml_backend_sycl_set_single_device_mode(int main_gpu_id);
|
|
||||||
GGML_API GGML_CALL void ggml_backend_sycl_set_mul_device_mode();
|
|
||||||
|
|
||||||
// SYCL doesn't support registering host memory, keep here for reference
|
|
||||||
// GGML_API GGML_CALL bool ggml_backend_sycl_register_host_buffer(void * buffer, size_t size);
|
|
||||||
// GGML_API GGML_CALL void ggml_backend_sycl_unregister_host_buffer(void * buffer);
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,29 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include "ggml.h"
|
|
||||||
#include "ggml-backend.h"
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define GGML_VK_NAME "Vulkan"
|
|
||||||
#define GGML_VK_MAX_DEVICES 16
|
|
||||||
|
|
||||||
GGML_API void ggml_vk_instance_init(void);
|
|
||||||
|
|
||||||
// backend API
|
|
||||||
GGML_API GGML_CALL ggml_backend_t ggml_backend_vk_init(size_t dev_num);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL bool ggml_backend_is_vk(ggml_backend_t backend);
|
|
||||||
GGML_API GGML_CALL int ggml_backend_vk_get_device_count(void);
|
|
||||||
GGML_API GGML_CALL void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size);
|
|
||||||
GGML_API GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total);
|
|
||||||
|
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num);
|
|
||||||
// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
|
|
||||||
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type(void);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
17
bindings/ruby/ext/metal-embed.mk
Normal file
17
bindings/ruby/ext/metal-embed.mk
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
ggml/src/ggml-metal/ggml-metal-embed.o: \
|
||||||
|
ggml/src/ggml-metal/ggml-metal.metal \
|
||||||
|
ggml/src/ggml-metal/ggml-metal-impl.h \
|
||||||
|
ggml/src/ggml-common.h
|
||||||
|
@echo "Embedding Metal library"
|
||||||
|
@sed -e '/__embed_ggml-common.h__/r ggml/src/ggml-common.h' -e '/__embed_ggml-common.h__/d' < ggml/src/ggml-metal/ggml-metal.metal > ggml/src/ggml-metal/ggml-metal-embed.metal.tmp
|
||||||
|
@sed -e '/#include "ggml-metal-impl.h"/r ggml/src/ggml-metal/ggml-metal-impl.h' -e '/#include "ggml-metal-impl.h"/d' < ggml/src/ggml-metal/ggml-metal-embed.metal.tmp > ggml/src/ggml-metal/ggml-metal-embed.metal
|
||||||
|
$(eval TEMP_ASSEMBLY=$(shell mktemp -d))
|
||||||
|
@echo ".section __DATA, __ggml_metallib" > $(TEMP_ASSEMBLY)/ggml-metal-embed.s
|
||||||
|
@echo ".globl _ggml_metallib_start" >> $(TEMP_ASSEMBLY)/ggml-metal-embed.s
|
||||||
|
@echo "_ggml_metallib_start:" >> $(TEMP_ASSEMBLY)/ggml-metal-embed.s
|
||||||
|
@echo ".incbin \"ggml/src/ggml-metal/ggml-metal-embed.metal\"" >> $(TEMP_ASSEMBLY)/ggml-metal-embed.s
|
||||||
|
@echo ".globl _ggml_metallib_end" >> $(TEMP_ASSEMBLY)/ggml-metal-embed.s
|
||||||
|
@echo "_ggml_metallib_end:" >> $(TEMP_ASSEMBLY)/ggml-metal-embed.s
|
||||||
|
$(CC) $(CFLAGS) -c $(TEMP_ASSEMBLY)/ggml-metal-embed.s -o $@
|
||||||
|
@rm -f ${TEMP_ASSEMBLY}/ggml-metal-embed.s
|
||||||
|
@rmdir ${TEMP_ASSEMBLY}
|
6
bindings/ruby/ext/metal.mk
Normal file
6
bindings/ruby/ext/metal.mk
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
ggml/src/ggml-metal/ggml-metal.o: \
|
||||||
|
ggml/src/ggml-metal/ggml-metal.m \
|
||||||
|
ggml/src/ggml-metal/ggml-metal-impl.h \
|
||||||
|
ggml/include/ggml-metal.h \
|
||||||
|
ggml/include/ggml.h
|
||||||
|
$(CC) $(CFLAGS) -c $< -o $@
|
File diff suppressed because it is too large
Load Diff
@ -1,8 +1,15 @@
|
|||||||
#ifndef __RUBY_WHISPER_H
|
#ifndef RUBY_WHISPER_H
|
||||||
#define __RUBY_WHISPER_H
|
#define RUBY_WHISPER_H
|
||||||
|
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
VALUE *context;
|
||||||
|
VALUE user_data;
|
||||||
|
VALUE callback;
|
||||||
|
VALUE callbacks;
|
||||||
|
} ruby_whisper_callback_container;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
struct whisper_context *context;
|
struct whisper_context *context;
|
||||||
} ruby_whisper;
|
} ruby_whisper;
|
||||||
@ -10,6 +17,9 @@ typedef struct {
|
|||||||
typedef struct {
|
typedef struct {
|
||||||
struct whisper_full_params params;
|
struct whisper_full_params params;
|
||||||
bool diarize;
|
bool diarize;
|
||||||
|
ruby_whisper_callback_container *new_segment_callback_container;
|
||||||
|
ruby_whisper_callback_container *progress_callback_container;
|
||||||
|
ruby_whisper_callback_container *abort_callback_container;
|
||||||
} ruby_whisper_params;
|
} ruby_whisper_params;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
6
bindings/ruby/extsources.rb
Normal file
6
bindings/ruby/extsources.rb
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
require "yaml"
|
||||||
|
|
||||||
|
sources = `git ls-files -z ../..`.split("\x0")
|
||||||
|
paths = YAML.load_file("../../.github/workflows/bindings-ruby.yml")[true]["push"]["paths"]
|
||||||
|
paths.delete "bindings/ruby/**"
|
||||||
|
EXTSOURCES = (Dir.glob(paths, base: "../..").collect {|path| "../../#{path}"} << "../../LICENSE") & sources
|
163
bindings/ruby/lib/whisper/model/uri.rb
Normal file
163
bindings/ruby/lib/whisper/model/uri.rb
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
require "uri"
|
||||||
|
require "net/http"
|
||||||
|
require "time"
|
||||||
|
require "pathname"
|
||||||
|
require "io/console/size"
|
||||||
|
|
||||||
|
module Whisper
|
||||||
|
class Model
|
||||||
|
class URI
|
||||||
|
def initialize(uri)
|
||||||
|
@uri = URI(uri)
|
||||||
|
end
|
||||||
|
|
||||||
|
def to_path
|
||||||
|
cache
|
||||||
|
cache_path.to_path
|
||||||
|
end
|
||||||
|
|
||||||
|
def clear_cache
|
||||||
|
path = cache_path
|
||||||
|
path.delete if path.exist?
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def cache_path
|
||||||
|
base_cache_dir/@uri.host/@uri.path[1..]
|
||||||
|
end
|
||||||
|
|
||||||
|
def base_cache_dir
|
||||||
|
base = case RUBY_PLATFORM
|
||||||
|
when /mswin|mingw/
|
||||||
|
ENV.key?("LOCALAPPDATA") ? Pathname(ENV["LOCALAPPDATA"]) : Pathname(Dir.home)/"AppData/Local"
|
||||||
|
when /darwin/
|
||||||
|
Pathname(Dir.home)/"Library/Caches"
|
||||||
|
else
|
||||||
|
ENV.key?("XDG_CACHE_HOME") ? ENV["XDG_CACHE_HOME"] : Pathname(Dir.home)/".cache"
|
||||||
|
end
|
||||||
|
base/"whisper.cpp"
|
||||||
|
end
|
||||||
|
|
||||||
|
def cache
|
||||||
|
path = cache_path
|
||||||
|
headers = {}
|
||||||
|
headers["if-modified-since"] = path.mtime.httpdate if path.exist?
|
||||||
|
request @uri, headers
|
||||||
|
path
|
||||||
|
end
|
||||||
|
|
||||||
|
def request(uri, headers)
|
||||||
|
Net::HTTP.start uri.host, uri.port, use_ssl: uri.scheme == "https" do |http|
|
||||||
|
request = Net::HTTP::Get.new(uri, headers)
|
||||||
|
http.request request do |response|
|
||||||
|
case response
|
||||||
|
when Net::HTTPNotModified
|
||||||
|
# noop
|
||||||
|
when Net::HTTPOK
|
||||||
|
download response
|
||||||
|
when Net::HTTPRedirection
|
||||||
|
request URI(response["location"]), headers
|
||||||
|
else
|
||||||
|
return if headers.key?("if-modified-since") # Use cache file
|
||||||
|
|
||||||
|
raise "#{response.code} #{response.message}\n#{response.body}"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def download(response)
|
||||||
|
path = cache_path
|
||||||
|
path.dirname.mkpath unless path.dirname.exist?
|
||||||
|
downloading_path = Pathname("#{path}.downloading")
|
||||||
|
size = response.content_length
|
||||||
|
downloading_path.open "wb" do |file|
|
||||||
|
downloaded = 0
|
||||||
|
response.read_body do |chunk|
|
||||||
|
file << chunk
|
||||||
|
downloaded += chunk.bytesize
|
||||||
|
show_progress downloaded, size
|
||||||
|
end
|
||||||
|
$stderr.puts
|
||||||
|
end
|
||||||
|
downloading_path.rename path
|
||||||
|
end
|
||||||
|
|
||||||
|
def show_progress(current, size)
|
||||||
|
progress_rate_available = size && $stderr.tty?
|
||||||
|
|
||||||
|
unless @prev
|
||||||
|
@prev = Time.now
|
||||||
|
$stderr.puts "Downloading #{@uri} to #{cache_path}"
|
||||||
|
end
|
||||||
|
|
||||||
|
now = Time.now
|
||||||
|
|
||||||
|
if progress_rate_available
|
||||||
|
return if now - @prev < 1 && current < size
|
||||||
|
|
||||||
|
progress_width = 20
|
||||||
|
progress = current.to_f / size
|
||||||
|
arrow_length = progress * progress_width
|
||||||
|
arrow = "=" * (arrow_length - 1) + ">" + " " * (progress_width - arrow_length)
|
||||||
|
line = "[#{arrow}] (#{format_bytesize(current)} / #{format_bytesize(size)})"
|
||||||
|
padding = ' ' * ($stderr.winsize[1] - line.size)
|
||||||
|
$stderr.print "\r#{line}#{padding}"
|
||||||
|
else
|
||||||
|
return if now - @prev < 1
|
||||||
|
|
||||||
|
$stderr.print "."
|
||||||
|
end
|
||||||
|
@prev = now
|
||||||
|
end
|
||||||
|
|
||||||
|
def format_bytesize(bytesize)
|
||||||
|
return "0.0 B" if bytesize.zero?
|
||||||
|
|
||||||
|
units = %w[B KiB MiB GiB TiB]
|
||||||
|
exp = (Math.log(bytesize) / Math.log(1024)).to_i
|
||||||
|
format("%.1f %s", bytesize.to_f / 1024 ** exp, units[exp])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@pre_converted_models = %w[
|
||||||
|
tiny
|
||||||
|
tiny.en
|
||||||
|
tiny-q5_1
|
||||||
|
tiny.en-q5_1
|
||||||
|
tiny-q8_0
|
||||||
|
base
|
||||||
|
base.en
|
||||||
|
base-q5_1
|
||||||
|
base.en-q5_1
|
||||||
|
base-q8_0
|
||||||
|
small
|
||||||
|
small.en
|
||||||
|
small.en-tdrz
|
||||||
|
small-q5_1
|
||||||
|
small.en-q5_1
|
||||||
|
small-q8_0
|
||||||
|
medium
|
||||||
|
medium.en
|
||||||
|
medium-q5_0
|
||||||
|
medium.en-q5_0
|
||||||
|
medium-q8_0
|
||||||
|
large-v1
|
||||||
|
large-v2
|
||||||
|
large-v2-q5_0
|
||||||
|
large-v2-q8_0
|
||||||
|
large-v3
|
||||||
|
large-v3-q5_0
|
||||||
|
large-v3-turbo
|
||||||
|
large-v3-turbo-q5_0
|
||||||
|
large-v3-turbo-q8_0
|
||||||
|
].each_with_object({}) {|name, models|
|
||||||
|
models[name] = URI.new("https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-#{name}.bin")
|
||||||
|
}
|
||||||
|
|
||||||
|
class << self
|
||||||
|
attr_reader :pre_converted_models
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
153
bindings/ruby/sig/whisper.rbs
Normal file
153
bindings/ruby/sig/whisper.rbs
Normal file
@ -0,0 +1,153 @@
|
|||||||
|
module Whisper
|
||||||
|
interface _Samples
|
||||||
|
def length: () -> Integer
|
||||||
|
def each: { (Float) -> void } -> void
|
||||||
|
end
|
||||||
|
|
||||||
|
type log_callback = ^(Integer level, String message, Object user_data) -> void
|
||||||
|
type new_segment_callback = ^(Whisper::Context, void, Integer n_new, Object user_data) -> void
|
||||||
|
type progress_callback = ^(Whisper::Context, void, Integer progress, Object user_data) -> void
|
||||||
|
type abort_callback = ^(Whisper::Context, void, Object user_data) -> boolish
|
||||||
|
|
||||||
|
LOG_LEVEL_NONE: Integer
|
||||||
|
LOG_LEVEL_INFO: Integer
|
||||||
|
LOG_LEVEL_WARN: Integer
|
||||||
|
LOG_LEVEL_ERROR: Integer
|
||||||
|
LOG_LEVEL_DEBUG: Integer
|
||||||
|
LOG_LEVEL_CONT: Integer
|
||||||
|
|
||||||
|
def self.lang_max_id: () -> Integer
|
||||||
|
def self.lang_id: (string name) -> Integer
|
||||||
|
def self.lang_str: (Integer id) -> String
|
||||||
|
def self.lang_str_full: (Integer id) -> String
|
||||||
|
def self.log_set=: (log_callback) -> log_callback
|
||||||
|
def self.finalize_log_callback: (void) -> void # Second argument of ObjectSpace.define_finalizer
|
||||||
|
|
||||||
|
class Context
|
||||||
|
def initialize: (string | _ToPath | ::URI::HTTP ) -> void
|
||||||
|
def transcribe: (string, Params) -> void
|
||||||
|
| (string, Params) { (String) -> void } -> void
|
||||||
|
def model_n_vocab: () -> Integer
|
||||||
|
def model_n_audio_ctx: () -> Integer
|
||||||
|
def model_n_audio_state: () -> Integer
|
||||||
|
def model_n_text_head: () -> Integer
|
||||||
|
def model_n_text_layer: () -> Integer
|
||||||
|
def model_n_mels: () -> Integer
|
||||||
|
def model_ftype: () -> Integer
|
||||||
|
def model_type: () -> String
|
||||||
|
def full_n_segments: () -> Integer
|
||||||
|
def full_lang_id: () -> Integer
|
||||||
|
def full_get_segment_t0: (Integer) -> Integer
|
||||||
|
def full_get_segment_t1: (Integer) -> Integer
|
||||||
|
def full_get_segment_speaker_turn_next: (Integer) -> (true | false)
|
||||||
|
def full_get_segment_text: (Integer) -> String
|
||||||
|
def full_get_segment_no_speech_prob: (Integer) -> Float
|
||||||
|
def full: (Params, Array[Float], ?Integer) -> void
|
||||||
|
| (Params, _Samples, ?Integer) -> void
|
||||||
|
def full_parallel: (Params, Array[Float], ?Integer) -> void
|
||||||
|
| (Params, _Samples, ?Integer) -> void
|
||||||
|
| (Params, _Samples, ?Integer?, Integer) -> void
|
||||||
|
def each_segment: { (Segment) -> void } -> void
|
||||||
|
| () -> Enumerator[Segment]
|
||||||
|
def model: () -> Model
|
||||||
|
end
|
||||||
|
|
||||||
|
class Params
|
||||||
|
def initialize: () -> void
|
||||||
|
def language=: (String) -> String # TODO: Enumerate lang names
|
||||||
|
def language: () -> String
|
||||||
|
def translate=: (boolish) -> boolish
|
||||||
|
def translate: () -> (true | false)
|
||||||
|
def no_context=: (boolish) -> boolish
|
||||||
|
def no_context: () -> (true | false)
|
||||||
|
def single_segment=: (boolish) -> boolish
|
||||||
|
def single_segment: () -> (true | false)
|
||||||
|
def print_special=: (boolish) -> boolish
|
||||||
|
def print_special: () -> (true | false)
|
||||||
|
def print_progress=: (boolish) -> boolish
|
||||||
|
def print_progress: () -> (true | false)
|
||||||
|
def print_realtime=: (boolish) -> boolish
|
||||||
|
def print_realtime: () -> (true | false)
|
||||||
|
def print_timestamps=: (boolish) -> boolish
|
||||||
|
def print_timestamps: () -> (true | false)
|
||||||
|
def suppress_blank=: (boolish) -> boolish
|
||||||
|
def suppress_blank: () -> (true | false)
|
||||||
|
def suppress_nst=: (boolish) -> boolish
|
||||||
|
def suppress_nst: () -> (true | false)
|
||||||
|
def token_timestamps=: (boolish) -> boolish
|
||||||
|
def token_timestamps: () -> (true | false)
|
||||||
|
def split_on_word=: (boolish) -> boolish
|
||||||
|
def split_on_word: () -> (true | false)
|
||||||
|
def initial_prompt=: (_ToS) -> _ToS
|
||||||
|
def initial_prompt: () -> String
|
||||||
|
def diarize=: (boolish) -> boolish
|
||||||
|
def diarize: () -> (true | false)
|
||||||
|
def offset=: (Integer) -> Integer
|
||||||
|
def offset: () -> Integer
|
||||||
|
def duration=: (Integer) -> Integer
|
||||||
|
def duration: () -> Integer
|
||||||
|
def max_text_tokens=: (Integer) -> Integer
|
||||||
|
def max_text_tokens: () -> Integer
|
||||||
|
def temperature=: (Float) -> Float
|
||||||
|
def temperature: () -> Float
|
||||||
|
def max_initial_ts=: (Float) -> Float
|
||||||
|
def max_initial_ts: () -> Float
|
||||||
|
def length_penalty=: (Float) -> Float
|
||||||
|
def length_penalty: () -> Float
|
||||||
|
def temperature_inc=: (Float) -> Float
|
||||||
|
def temperature_inc: () -> Float
|
||||||
|
def entropy_thold=: (Float) -> Float
|
||||||
|
def entropy_thold: () -> Float
|
||||||
|
def logprob_thold=: (Float) -> Float
|
||||||
|
def logprob_thold: () -> Float
|
||||||
|
def no_speech_thold=: (Float) -> Float
|
||||||
|
def no_speech_thold: () -> Float
|
||||||
|
def new_segment_callback=: (new_segment_callback) -> new_segment_callback
|
||||||
|
def new_segment_callback_user_data=: (Object) -> Object
|
||||||
|
def progress_callback=: (progress_callback) -> progress_callback
|
||||||
|
def progress_callback_user_data=: (Object) -> Object
|
||||||
|
def abort_callback=: (abort_callback) -> abort_callback
|
||||||
|
def abort_callback_user_data=: (Object) -> Object
|
||||||
|
def on_new_segment: { (Segment) -> void } -> void
|
||||||
|
def on_progress: { (Integer) -> void } -> void
|
||||||
|
def abort_on: { (Object) -> boolish } -> void
|
||||||
|
end
|
||||||
|
|
||||||
|
class Model
|
||||||
|
def self.pre_converted_models: () -> Hash[String, Model::URI]
|
||||||
|
def initialize: () -> void
|
||||||
|
def n_vocab: () -> Integer
|
||||||
|
def n_audio_ctx: () -> Integer
|
||||||
|
def n_audio_state: () -> Integer
|
||||||
|
def n_audio_head: () -> Integer
|
||||||
|
def n_audio_layer: () -> Integer
|
||||||
|
def n_text_ctx: () -> Integer
|
||||||
|
def n_text_state: () -> Integer
|
||||||
|
def n_text_head: () -> Integer
|
||||||
|
def n_text_layer: () -> Integer
|
||||||
|
def n_mels: () -> Integer
|
||||||
|
def ftype: () -> Integer
|
||||||
|
def type: () -> String
|
||||||
|
|
||||||
|
class URI
|
||||||
|
def initialize: (string | ::URI::HTTP) -> void
|
||||||
|
def to_path: -> String
|
||||||
|
def clear_cache: -> void
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
class Segment
|
||||||
|
def initialize: () -> void
|
||||||
|
def start_time: () -> Integer
|
||||||
|
def end_time: () -> Integer
|
||||||
|
def speaker_next_turn?: () -> (true | false)
|
||||||
|
def text: () -> String
|
||||||
|
def no_speech_prob: () -> Float
|
||||||
|
end
|
||||||
|
|
||||||
|
class Error < StandardError
|
||||||
|
attr_reader code: Integer
|
||||||
|
|
||||||
|
def initialize: (Integer) -> void
|
||||||
|
end
|
||||||
|
end
|
24
bindings/ruby/tests/helper.rb
Normal file
24
bindings/ruby/tests/helper.rb
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
require "test/unit"
|
||||||
|
require "whisper"
|
||||||
|
require_relative "jfk_reader/jfk_reader"
|
||||||
|
|
||||||
|
class TestBase < Test::Unit::TestCase
|
||||||
|
AUDIO = File.join(__dir__, "..", "..", "..", "samples", "jfk.wav")
|
||||||
|
|
||||||
|
class << self
|
||||||
|
attr_reader :whisper
|
||||||
|
|
||||||
|
def startup
|
||||||
|
@whisper = Whisper::Context.new("base.en")
|
||||||
|
params = Whisper::Params.new
|
||||||
|
params.print_timestamps = false
|
||||||
|
@whisper.transcribe(TestBase::AUDIO, params)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def whisper
|
||||||
|
self.class.whisper
|
||||||
|
end
|
||||||
|
end
|
5
bindings/ruby/tests/jfk_reader/.gitignore
vendored
Normal file
5
bindings/ruby/tests/jfk_reader/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
Makefile
|
||||||
|
jfk_reader.o
|
||||||
|
jfk_reader.so
|
||||||
|
jfk_reader.bundle
|
||||||
|
jfk_reader.dll
|
3
bindings/ruby/tests/jfk_reader/extconf.rb
Normal file
3
bindings/ruby/tests/jfk_reader/extconf.rb
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
require "mkmf"
|
||||||
|
|
||||||
|
create_makefile("jfk_reader")
|
68
bindings/ruby/tests/jfk_reader/jfk_reader.c
Normal file
68
bindings/ruby/tests/jfk_reader/jfk_reader.c
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
#include <ruby.h>
|
||||||
|
#include <ruby/memory_view.h>
|
||||||
|
#include <ruby/encoding.h>
|
||||||
|
|
||||||
|
static VALUE
|
||||||
|
jfk_reader_initialize(VALUE self, VALUE audio_path)
|
||||||
|
{
|
||||||
|
rb_iv_set(self, "audio_path", audio_path);
|
||||||
|
return Qnil;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
jfk_reader_get_memory_view(const VALUE obj, rb_memory_view_t *view, int flags)
|
||||||
|
{
|
||||||
|
VALUE audio_path = rb_iv_get(obj, "audio_path");
|
||||||
|
const char *audio_path_str = StringValueCStr(audio_path);
|
||||||
|
const int n_samples = 176000;
|
||||||
|
float *data = (float *)malloc(n_samples * sizeof(float));
|
||||||
|
short *samples = (short *)malloc(n_samples * sizeof(short));
|
||||||
|
FILE *file = fopen(audio_path_str, "rb");
|
||||||
|
|
||||||
|
fseek(file, 78, SEEK_SET);
|
||||||
|
fread(samples, sizeof(short), n_samples, file);
|
||||||
|
fclose(file);
|
||||||
|
for (int i = 0; i < n_samples; i++) {
|
||||||
|
data[i] = samples[i]/32768.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
view->obj = obj;
|
||||||
|
view->data = (void *)data;
|
||||||
|
view->byte_size = sizeof(float) * n_samples;
|
||||||
|
view->readonly = true;
|
||||||
|
view->format = "f";
|
||||||
|
view->item_size = sizeof(float);
|
||||||
|
view->item_desc.components = NULL;
|
||||||
|
view->item_desc.length = 0;
|
||||||
|
view->ndim = 1;
|
||||||
|
view->shape = NULL;
|
||||||
|
view->sub_offsets = NULL;
|
||||||
|
view->private_data = NULL;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
jfk_reader_release_memory_view(const VALUE obj, rb_memory_view_t *view)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
jfk_reader_memory_view_available_p(const VALUE obj)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const rb_memory_view_entry_t jfk_reader_view_entry = {
|
||||||
|
jfk_reader_get_memory_view,
|
||||||
|
jfk_reader_release_memory_view,
|
||||||
|
jfk_reader_memory_view_available_p
|
||||||
|
};
|
||||||
|
|
||||||
|
void Init_jfk_reader(void)
|
||||||
|
{
|
||||||
|
VALUE cJFKReader = rb_define_class("JFKReader", rb_cObject);
|
||||||
|
rb_memory_view_register(cJFKReader, &jfk_reader_view_entry);
|
||||||
|
rb_define_method(cJFKReader, "initialize", jfk_reader_initialize, 1);
|
||||||
|
}
|
160
bindings/ruby/tests/test_callback.rb
Normal file
160
bindings/ruby/tests/test_callback.rb
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
require_relative "helper"
|
||||||
|
|
||||||
|
class TestCallback < TestBase
|
||||||
|
def setup
|
||||||
|
GC.start
|
||||||
|
@params = Whisper::Params.new
|
||||||
|
@whisper = Whisper::Context.new("base.en")
|
||||||
|
@audio = File.join(AUDIO)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_new_segment_callback
|
||||||
|
@params.new_segment_callback = ->(context, state, n_new, user_data) {
|
||||||
|
assert_kind_of Integer, n_new
|
||||||
|
assert n_new > 0
|
||||||
|
assert_same @whisper, context
|
||||||
|
|
||||||
|
n_segments = context.full_n_segments
|
||||||
|
n_new.times do |i|
|
||||||
|
i_segment = n_segments - 1 + i
|
||||||
|
start_time = context.full_get_segment_t0(i_segment) * 10
|
||||||
|
end_time = context.full_get_segment_t1(i_segment) * 10
|
||||||
|
text = context.full_get_segment_text(i_segment)
|
||||||
|
|
||||||
|
assert_kind_of Integer, start_time
|
||||||
|
assert start_time >= 0
|
||||||
|
assert_kind_of Integer, end_time
|
||||||
|
assert end_time > 0
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, text if i_segment == 0
|
||||||
|
end
|
||||||
|
}
|
||||||
|
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_new_segment_callback_closure
|
||||||
|
search_word = "what"
|
||||||
|
@params.new_segment_callback = ->(context, state, n_new, user_data) {
|
||||||
|
n_segments = context.full_n_segments
|
||||||
|
n_new.times do |i|
|
||||||
|
i_segment = n_segments - 1 + i
|
||||||
|
text = context.full_get_segment_text(i_segment)
|
||||||
|
if text.include?(search_word)
|
||||||
|
t0 = context.full_get_segment_t0(i_segment)
|
||||||
|
t1 = context.full_get_segment_t1(i_segment)
|
||||||
|
raise "search word '#{search_word}' found at between #{t0} and #{t1}"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_raise RuntimeError do
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_new_segment_callback_user_data
|
||||||
|
udata = Object.new
|
||||||
|
@params.new_segment_callback_user_data = udata
|
||||||
|
@params.new_segment_callback = ->(context, state, n_new, user_data) {
|
||||||
|
assert_same udata, user_data
|
||||||
|
}
|
||||||
|
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_new_segment_callback_user_data_gc
|
||||||
|
@params.new_segment_callback_user_data = "My user data"
|
||||||
|
@params.new_segment_callback = ->(context, state, n_new, user_data) {
|
||||||
|
assert_equal "My user data", user_data
|
||||||
|
}
|
||||||
|
GC.start
|
||||||
|
|
||||||
|
assert_same @whisper, @whisper.transcribe(@audio, @params)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_progress_callback
|
||||||
|
first = nil
|
||||||
|
last = nil
|
||||||
|
@params.progress_callback = ->(context, state, progress, user_data) {
|
||||||
|
assert_kind_of Integer, progress
|
||||||
|
assert 0 <= progress && progress <= 100
|
||||||
|
assert_same @whisper, context
|
||||||
|
first = progress if first.nil?
|
||||||
|
last = progress
|
||||||
|
}
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
assert_equal 0, first
|
||||||
|
assert_equal 100, last
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_progress_callback_user_data
|
||||||
|
udata = Object.new
|
||||||
|
@params.progress_callback_user_data = udata
|
||||||
|
@params.progress_callback = ->(context, state, n_new, user_data) {
|
||||||
|
assert_same udata, user_data
|
||||||
|
}
|
||||||
|
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_on_progress
|
||||||
|
first = nil
|
||||||
|
last = nil
|
||||||
|
@params.on_progress do |progress|
|
||||||
|
assert_kind_of Integer, progress
|
||||||
|
assert 0 <= progress && progress <= 100
|
||||||
|
first = progress if first.nil?
|
||||||
|
last = progress
|
||||||
|
end
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
assert_equal 0, first
|
||||||
|
assert_equal 100, last
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_abort_callback
|
||||||
|
i = 0
|
||||||
|
@params.abort_callback = ->(user_data) {
|
||||||
|
assert_nil user_data
|
||||||
|
i += 1
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
assert i > 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_abort_callback_abort
|
||||||
|
i = 0
|
||||||
|
@params.abort_callback = ->(user_data) {
|
||||||
|
i += 1
|
||||||
|
return i == 3
|
||||||
|
}
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
assert_equal 3, i
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_abort_callback_user_data
|
||||||
|
udata = Object.new
|
||||||
|
@params.abort_callback_user_data = udata
|
||||||
|
yielded = nil
|
||||||
|
@params.abort_callback = ->(user_data) {
|
||||||
|
yielded = user_data
|
||||||
|
}
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
assert_same udata, yielded
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_abort_on
|
||||||
|
do_abort = false
|
||||||
|
aborted_from_callback = false
|
||||||
|
@params.on_new_segment do |segment|
|
||||||
|
do_abort = true if segment.text.match? /ask/
|
||||||
|
end
|
||||||
|
i = 0
|
||||||
|
@params.abort_on do
|
||||||
|
i += 1
|
||||||
|
do_abort
|
||||||
|
end
|
||||||
|
@whisper.transcribe(@audio, @params)
|
||||||
|
assert i > 0
|
||||||
|
end
|
||||||
|
end
|
20
bindings/ruby/tests/test_error.rb
Normal file
20
bindings/ruby/tests/test_error.rb
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
require_relative "helper"
|
||||||
|
|
||||||
|
class TestError < TestBase
|
||||||
|
def test_error
|
||||||
|
error = Whisper::Error.new(-2)
|
||||||
|
assert_equal "failed to compute log mel spectrogram", error.message
|
||||||
|
assert_equal -2, error.code
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_unknown_error
|
||||||
|
error = Whisper::Error.new(-20)
|
||||||
|
assert_equal "unknown error", error.message
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_non_int_code
|
||||||
|
assert_raise TypeError do
|
||||||
|
error = Whisper::Error.new("non int")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
109
bindings/ruby/tests/test_model.rb
Normal file
109
bindings/ruby/tests/test_model.rb
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
require_relative "helper"
|
||||||
|
require "pathname"
|
||||||
|
|
||||||
|
class TestModel < TestBase
|
||||||
|
def test_model
|
||||||
|
whisper = Whisper::Context.new("base.en")
|
||||||
|
assert_instance_of Whisper::Model, whisper.model
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_attributes
|
||||||
|
whisper = Whisper::Context.new("base.en")
|
||||||
|
model = whisper.model
|
||||||
|
|
||||||
|
assert_equal 51864, model.n_vocab
|
||||||
|
assert_equal 1500, model.n_audio_ctx
|
||||||
|
assert_equal 512, model.n_audio_state
|
||||||
|
assert_equal 8, model.n_audio_head
|
||||||
|
assert_equal 6, model.n_audio_layer
|
||||||
|
assert_equal 448, model.n_text_ctx
|
||||||
|
assert_equal 512, model.n_text_state
|
||||||
|
assert_equal 8, model.n_text_head
|
||||||
|
assert_equal 6, model.n_text_layer
|
||||||
|
assert_equal 80, model.n_mels
|
||||||
|
assert_equal 1, model.ftype
|
||||||
|
assert_equal "base", model.type
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_gc
|
||||||
|
model = Whisper::Context.new("base.en").model
|
||||||
|
GC.start
|
||||||
|
|
||||||
|
assert_equal 51864, model.n_vocab
|
||||||
|
assert_equal 1500, model.n_audio_ctx
|
||||||
|
assert_equal 512, model.n_audio_state
|
||||||
|
assert_equal 8, model.n_audio_head
|
||||||
|
assert_equal 6, model.n_audio_layer
|
||||||
|
assert_equal 448, model.n_text_ctx
|
||||||
|
assert_equal 512, model.n_text_state
|
||||||
|
assert_equal 8, model.n_text_head
|
||||||
|
assert_equal 6, model.n_text_layer
|
||||||
|
assert_equal 80, model.n_mels
|
||||||
|
assert_equal 1, model.ftype
|
||||||
|
assert_equal "base", model.type
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_pathname
|
||||||
|
path = Pathname(Whisper::Model.pre_converted_models["base.en"].to_path)
|
||||||
|
whisper = Whisper::Context.new(path)
|
||||||
|
model = whisper.model
|
||||||
|
|
||||||
|
assert_equal 51864, model.n_vocab
|
||||||
|
assert_equal 1500, model.n_audio_ctx
|
||||||
|
assert_equal 512, model.n_audio_state
|
||||||
|
assert_equal 8, model.n_audio_head
|
||||||
|
assert_equal 6, model.n_audio_layer
|
||||||
|
assert_equal 448, model.n_text_ctx
|
||||||
|
assert_equal 512, model.n_text_state
|
||||||
|
assert_equal 8, model.n_text_head
|
||||||
|
assert_equal 6, model.n_text_layer
|
||||||
|
assert_equal 80, model.n_mels
|
||||||
|
assert_equal 1, model.ftype
|
||||||
|
assert_equal "base", model.type
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_auto_download
|
||||||
|
path = Whisper::Model.pre_converted_models["base.en"].to_path
|
||||||
|
|
||||||
|
assert_path_exist path
|
||||||
|
assert_equal 147964211, File.size(path)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_uri_string
|
||||||
|
path = "https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin"
|
||||||
|
whisper = Whisper::Context.new(path)
|
||||||
|
model = whisper.model
|
||||||
|
|
||||||
|
assert_equal 51864, model.n_vocab
|
||||||
|
assert_equal 1500, model.n_audio_ctx
|
||||||
|
assert_equal 512, model.n_audio_state
|
||||||
|
assert_equal 8, model.n_audio_head
|
||||||
|
assert_equal 6, model.n_audio_layer
|
||||||
|
assert_equal 448, model.n_text_ctx
|
||||||
|
assert_equal 512, model.n_text_state
|
||||||
|
assert_equal 8, model.n_text_head
|
||||||
|
assert_equal 6, model.n_text_layer
|
||||||
|
assert_equal 80, model.n_mels
|
||||||
|
assert_equal 1, model.ftype
|
||||||
|
assert_equal "base", model.type
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_uri
|
||||||
|
path = URI("https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin")
|
||||||
|
whisper = Whisper::Context.new(path)
|
||||||
|
model = whisper.model
|
||||||
|
|
||||||
|
assert_equal 51864, model.n_vocab
|
||||||
|
assert_equal 1500, model.n_audio_ctx
|
||||||
|
assert_equal 512, model.n_audio_state
|
||||||
|
assert_equal 8, model.n_audio_head
|
||||||
|
assert_equal 6, model.n_audio_layer
|
||||||
|
assert_equal 448, model.n_text_ctx
|
||||||
|
assert_equal 512, model.n_text_state
|
||||||
|
assert_equal 8, model.n_text_head
|
||||||
|
assert_equal 6, model.n_text_layer
|
||||||
|
assert_equal 80, model.n_mels
|
||||||
|
assert_equal 1, model.ftype
|
||||||
|
assert_equal "base", model.type
|
||||||
|
end
|
||||||
|
end
|
31
bindings/ruby/tests/test_package.rb
Normal file
31
bindings/ruby/tests/test_package.rb
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
require_relative "helper"
|
||||||
|
require 'tempfile'
|
||||||
|
require 'tmpdir'
|
||||||
|
require 'shellwords'
|
||||||
|
|
||||||
|
class TestPackage < TestBase
|
||||||
|
def test_build
|
||||||
|
Tempfile.create do |file|
|
||||||
|
assert system("gem", "build", "whispercpp.gemspec", "--output", file.to_path.shellescape, exception: true)
|
||||||
|
assert file.size > 0
|
||||||
|
assert_path_exist file.to_path
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
sub_test_case "Building binary on installation" do
|
||||||
|
def setup
|
||||||
|
system "rake", "build", exception: true
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_install
|
||||||
|
match_data = `rake -Tbuild`.match(/(whispercpp-(.+)\.gem)/)
|
||||||
|
filename = match_data[1]
|
||||||
|
version = match_data[2]
|
||||||
|
basename = "whisper.#{RbConfig::CONFIG["DLEXT"]}"
|
||||||
|
Dir.mktmpdir do |dir|
|
||||||
|
system "gem", "install", "--install-dir", dir.shellescape, "--no-document", "pkg/#{filename.shellescape}", exception: true
|
||||||
|
assert_path_exist File.join(dir, "gems/whispercpp-#{version}/lib", basename)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
160
bindings/ruby/tests/test_params.rb
Normal file
160
bindings/ruby/tests/test_params.rb
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
require_relative "helper"
|
||||||
|
|
||||||
|
class TestParams < TestBase
|
||||||
|
def setup
|
||||||
|
@params = Whisper::Params.new
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_language
|
||||||
|
@params.language = "en"
|
||||||
|
assert_equal @params.language, "en"
|
||||||
|
@params.language = "auto"
|
||||||
|
assert_equal @params.language, "auto"
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_offset
|
||||||
|
@params.offset = 10_000
|
||||||
|
assert_equal @params.offset, 10_000
|
||||||
|
@params.offset = 0
|
||||||
|
assert_equal @params.offset, 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_duration
|
||||||
|
@params.duration = 60_000
|
||||||
|
assert_equal @params.duration, 60_000
|
||||||
|
@params.duration = 0
|
||||||
|
assert_equal @params.duration, 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_max_text_tokens
|
||||||
|
@params.max_text_tokens = 300
|
||||||
|
assert_equal @params.max_text_tokens, 300
|
||||||
|
@params.max_text_tokens = 0
|
||||||
|
assert_equal @params.max_text_tokens, 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_translate
|
||||||
|
@params.translate = true
|
||||||
|
assert @params.translate
|
||||||
|
@params.translate = false
|
||||||
|
assert !@params.translate
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_no_context
|
||||||
|
@params.no_context = true
|
||||||
|
assert @params.no_context
|
||||||
|
@params.no_context = false
|
||||||
|
assert !@params.no_context
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_single_segment
|
||||||
|
@params.single_segment = true
|
||||||
|
assert @params.single_segment
|
||||||
|
@params.single_segment = false
|
||||||
|
assert !@params.single_segment
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_print_special
|
||||||
|
@params.print_special = true
|
||||||
|
assert @params.print_special
|
||||||
|
@params.print_special = false
|
||||||
|
assert !@params.print_special
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_print_progress
|
||||||
|
@params.print_progress = true
|
||||||
|
assert @params.print_progress
|
||||||
|
@params.print_progress = false
|
||||||
|
assert !@params.print_progress
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_print_realtime
|
||||||
|
@params.print_realtime = true
|
||||||
|
assert @params.print_realtime
|
||||||
|
@params.print_realtime = false
|
||||||
|
assert !@params.print_realtime
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_print_timestamps
|
||||||
|
@params.print_timestamps = true
|
||||||
|
assert @params.print_timestamps
|
||||||
|
@params.print_timestamps = false
|
||||||
|
assert !@params.print_timestamps
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_suppress_blank
|
||||||
|
@params.suppress_blank = true
|
||||||
|
assert @params.suppress_blank
|
||||||
|
@params.suppress_blank = false
|
||||||
|
assert !@params.suppress_blank
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_suppress_nst
|
||||||
|
@params.suppress_nst = true
|
||||||
|
assert @params.suppress_nst
|
||||||
|
@params.suppress_nst = false
|
||||||
|
assert !@params.suppress_nst
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_token_timestamps
|
||||||
|
@params.token_timestamps = true
|
||||||
|
assert @params.token_timestamps
|
||||||
|
@params.token_timestamps = false
|
||||||
|
assert !@params.token_timestamps
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_split_on_word
|
||||||
|
@params.split_on_word = true
|
||||||
|
assert @params.split_on_word
|
||||||
|
@params.split_on_word = false
|
||||||
|
assert !@params.split_on_word
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_initial_prompt
|
||||||
|
assert_nil @params.initial_prompt
|
||||||
|
@params.initial_prompt = "You are a polite person."
|
||||||
|
assert_equal "You are a polite person.", @params.initial_prompt
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_temperature
|
||||||
|
assert_equal 0.0, @params.temperature
|
||||||
|
@params.temperature = 0.5
|
||||||
|
assert_equal 0.5, @params.temperature
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_max_initial_ts
|
||||||
|
assert_equal 1.0, @params.max_initial_ts
|
||||||
|
@params.max_initial_ts = 600.0
|
||||||
|
assert_equal 600.0, @params.max_initial_ts
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_length_penalty
|
||||||
|
assert_equal -1.0, @params.length_penalty
|
||||||
|
@params.length_penalty = 0.5
|
||||||
|
assert_equal 0.5, @params.length_penalty
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_temperature_inc
|
||||||
|
assert_in_delta 0.2, @params.temperature_inc
|
||||||
|
@params.temperature_inc = 0.5
|
||||||
|
assert_in_delta 0.5, @params.temperature_inc
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_entropy_thold
|
||||||
|
assert_in_delta 2.4, @params.entropy_thold
|
||||||
|
@params.entropy_thold = 3.0
|
||||||
|
assert_in_delta 3.0, @params.entropy_thold
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_logprob_thold
|
||||||
|
assert_in_delta -1.0, @params.logprob_thold
|
||||||
|
@params.logprob_thold = -0.5
|
||||||
|
assert_in_delta -0.5, @params.logprob_thold
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_no_speech_thold
|
||||||
|
assert_in_delta 0.6, @params.no_speech_thold
|
||||||
|
@params.no_speech_thold = 0.2
|
||||||
|
assert_in_delta 0.2, @params.no_speech_thold
|
||||||
|
end
|
||||||
|
end
|
74
bindings/ruby/tests/test_segment.rb
Normal file
74
bindings/ruby/tests/test_segment.rb
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
require_relative "helper"
|
||||||
|
|
||||||
|
class TestSegment < TestBase
|
||||||
|
def test_iteration
|
||||||
|
whisper.each_segment do |segment|
|
||||||
|
assert_instance_of Whisper::Segment, segment
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_enumerator
|
||||||
|
enum = whisper.each_segment
|
||||||
|
assert_instance_of Enumerator, enum
|
||||||
|
enum.to_a.each_with_index do |segment, index|
|
||||||
|
assert_instance_of Whisper::Segment, segment
|
||||||
|
assert_kind_of Integer, index
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_start_time
|
||||||
|
i = 0
|
||||||
|
whisper.each_segment do |segment|
|
||||||
|
assert_equal 0, segment.start_time if i == 0
|
||||||
|
i += 1
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_end_time
|
||||||
|
i = 0
|
||||||
|
whisper.each_segment do |segment|
|
||||||
|
assert_equal whisper.full_get_segment_t1(i) * 10, segment.end_time
|
||||||
|
i += 1
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_no_speech_prob
|
||||||
|
no_speech_prob = nil
|
||||||
|
whisper.each_segment do |segment|
|
||||||
|
no_speech_prob = segment.no_speech_prob
|
||||||
|
end
|
||||||
|
assert no_speech_prob > 0.0
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_on_new_segment
|
||||||
|
params = Whisper::Params.new
|
||||||
|
seg = nil
|
||||||
|
index = 0
|
||||||
|
params.on_new_segment do |segment|
|
||||||
|
assert_instance_of Whisper::Segment, segment
|
||||||
|
if index == 0
|
||||||
|
seg = segment
|
||||||
|
assert_equal 0, segment.start_time
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, segment.text
|
||||||
|
end
|
||||||
|
index += 1
|
||||||
|
end
|
||||||
|
whisper.transcribe(AUDIO, params)
|
||||||
|
assert_equal 0, seg.start_time
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, seg.text
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_on_new_segment_twice
|
||||||
|
params = Whisper::Params.new
|
||||||
|
seg = nil
|
||||||
|
params.on_new_segment do |segment|
|
||||||
|
seg = segment
|
||||||
|
return
|
||||||
|
end
|
||||||
|
params.on_new_segment do |segment|
|
||||||
|
assert_same seg, segment
|
||||||
|
return
|
||||||
|
end
|
||||||
|
whisper.transcribe(AUDIO, params)
|
||||||
|
end
|
||||||
|
end
|
@ -1,131 +1,217 @@
|
|||||||
TOPDIR = File.expand_path(File.join(File.dirname(__FILE__), '..'))
|
require_relative "helper"
|
||||||
EXTDIR = File.join(TOPDIR, 'ext')
|
require "stringio"
|
||||||
#$LIBDIR = File.join(TOPDIR, 'lib')
|
require "etc"
|
||||||
#$:.unshift(LIBDIR)
|
|
||||||
$:.unshift(EXTDIR)
|
|
||||||
|
|
||||||
require 'whisper'
|
# Exists to detect memory-related bug
|
||||||
require 'test/unit'
|
Whisper.log_set ->(level, buffer, user_data) {}, nil
|
||||||
|
|
||||||
class TestWhisper < Test::Unit::TestCase
|
class TestWhisper < TestBase
|
||||||
def setup
|
def setup
|
||||||
@params = Whisper::Params.new
|
@params = Whisper::Params.new
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_language
|
|
||||||
@params.language = "en"
|
|
||||||
assert_equal @params.language, "en"
|
|
||||||
@params.language = "auto"
|
|
||||||
assert_equal @params.language, "auto"
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_offset
|
|
||||||
@params.offset = 10_000
|
|
||||||
assert_equal @params.offset, 10_000
|
|
||||||
@params.offset = 0
|
|
||||||
assert_equal @params.offset, 0
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_duration
|
|
||||||
@params.duration = 60_000
|
|
||||||
assert_equal @params.duration, 60_000
|
|
||||||
@params.duration = 0
|
|
||||||
assert_equal @params.duration, 0
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_max_text_tokens
|
|
||||||
@params.max_text_tokens = 300
|
|
||||||
assert_equal @params.max_text_tokens, 300
|
|
||||||
@params.max_text_tokens = 0
|
|
||||||
assert_equal @params.max_text_tokens, 0
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_translate
|
|
||||||
@params.translate = true
|
|
||||||
assert @params.translate
|
|
||||||
@params.translate = false
|
|
||||||
assert !@params.translate
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_no_context
|
|
||||||
@params.no_context = true
|
|
||||||
assert @params.no_context
|
|
||||||
@params.no_context = false
|
|
||||||
assert !@params.no_context
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_single_segment
|
|
||||||
@params.single_segment = true
|
|
||||||
assert @params.single_segment
|
|
||||||
@params.single_segment = false
|
|
||||||
assert !@params.single_segment
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_print_special
|
|
||||||
@params.print_special = true
|
|
||||||
assert @params.print_special
|
|
||||||
@params.print_special = false
|
|
||||||
assert !@params.print_special
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_print_progress
|
|
||||||
@params.print_progress = true
|
|
||||||
assert @params.print_progress
|
|
||||||
@params.print_progress = false
|
|
||||||
assert !@params.print_progress
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_print_realtime
|
|
||||||
@params.print_realtime = true
|
|
||||||
assert @params.print_realtime
|
|
||||||
@params.print_realtime = false
|
|
||||||
assert !@params.print_realtime
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_print_timestamps
|
|
||||||
@params.print_timestamps = true
|
|
||||||
assert @params.print_timestamps
|
|
||||||
@params.print_timestamps = false
|
|
||||||
assert !@params.print_timestamps
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_suppress_blank
|
|
||||||
@params.suppress_blank = true
|
|
||||||
assert @params.suppress_blank
|
|
||||||
@params.suppress_blank = false
|
|
||||||
assert !@params.suppress_blank
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_suppress_non_speech_tokens
|
|
||||||
@params.suppress_non_speech_tokens = true
|
|
||||||
assert @params.suppress_non_speech_tokens
|
|
||||||
@params.suppress_non_speech_tokens = false
|
|
||||||
assert !@params.suppress_non_speech_tokens
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_token_timestamps
|
|
||||||
@params.token_timestamps = true
|
|
||||||
assert @params.token_timestamps
|
|
||||||
@params.token_timestamps = false
|
|
||||||
assert !@params.token_timestamps
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_split_on_word
|
|
||||||
@params.split_on_word = true
|
|
||||||
assert @params.split_on_word
|
|
||||||
@params.split_on_word = false
|
|
||||||
assert !@params.split_on_word
|
|
||||||
end
|
|
||||||
|
|
||||||
def test_whisper
|
def test_whisper
|
||||||
@whisper = Whisper::Context.new(File.join(TOPDIR, '..', '..', 'models', 'ggml-base.en.bin'))
|
@whisper = Whisper::Context.new("base.en")
|
||||||
params = Whisper::Params.new
|
params = Whisper::Params.new
|
||||||
params.print_timestamps = false
|
params.print_timestamps = false
|
||||||
|
|
||||||
jfk = File.join(TOPDIR, '..', '..', 'samples', 'jfk.wav')
|
@whisper.transcribe(AUDIO, params) {|text|
|
||||||
@whisper.transcribe(jfk, params) {|text|
|
|
||||||
assert_match /ask not what your country can do for you, ask what you can do for your country/, text
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, text
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
sub_test_case "After transcription" do
|
||||||
|
def test_full_n_segments
|
||||||
|
assert_equal 1, whisper.full_n_segments
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_lang_id
|
||||||
|
assert_equal 0, whisper.full_lang_id
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_get_segment_t0
|
||||||
|
assert_equal 0, whisper.full_get_segment_t0(0)
|
||||||
|
assert_raise IndexError do
|
||||||
|
whisper.full_get_segment_t0(whisper.full_n_segments)
|
||||||
|
end
|
||||||
|
assert_raise IndexError do
|
||||||
|
whisper.full_get_segment_t0(-1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_get_segment_t1
|
||||||
|
t1 = whisper.full_get_segment_t1(0)
|
||||||
|
assert_kind_of Integer, t1
|
||||||
|
assert t1 > 0
|
||||||
|
assert_raise IndexError do
|
||||||
|
whisper.full_get_segment_t1(whisper.full_n_segments)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_get_segment_speaker_turn_next
|
||||||
|
assert_false whisper.full_get_segment_speaker_turn_next(0)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_get_segment_text
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, whisper.full_get_segment_text(0)
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_get_segment_no_speech_prob
|
||||||
|
prob = whisper.full_get_segment_no_speech_prob(0)
|
||||||
|
assert prob > 0.0
|
||||||
|
assert prob < 1.0
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_lang_max_id
|
||||||
|
assert_kind_of Integer, Whisper.lang_max_id
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_lang_id
|
||||||
|
assert_equal 0, Whisper.lang_id("en")
|
||||||
|
assert_raise ArgumentError do
|
||||||
|
Whisper.lang_id("non existing language")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_lang_str
|
||||||
|
assert_equal "en", Whisper.lang_str(0)
|
||||||
|
assert_raise IndexError do
|
||||||
|
Whisper.lang_str(Whisper.lang_max_id + 1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_lang_str_full
|
||||||
|
assert_equal "english", Whisper.lang_str_full(0)
|
||||||
|
assert_raise IndexError do
|
||||||
|
Whisper.lang_str_full(Whisper.lang_max_id + 1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_log_set
|
||||||
|
user_data = Object.new
|
||||||
|
logs = []
|
||||||
|
log_callback = ->(level, buffer, udata) {
|
||||||
|
logs << [level, buffer, udata]
|
||||||
|
}
|
||||||
|
Whisper.log_set log_callback, user_data
|
||||||
|
Whisper::Context.new("base.en")
|
||||||
|
|
||||||
|
assert logs.length > 30
|
||||||
|
logs.each do |log|
|
||||||
|
assert_include [Whisper::LOG_LEVEL_DEBUG, Whisper::LOG_LEVEL_INFO, Whisper::LOG_LEVEL_WARN], log[0]
|
||||||
|
assert_same user_data, log[2]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_log_suppress
|
||||||
|
stderr = $stderr
|
||||||
|
Whisper.log_set ->(level, buffer, user_data) {
|
||||||
|
# do nothing
|
||||||
|
}, nil
|
||||||
|
dev = StringIO.new("")
|
||||||
|
$stderr = dev
|
||||||
|
Whisper::Context.new("base.en")
|
||||||
|
assert_empty dev.string
|
||||||
|
ensure
|
||||||
|
$stderr = stderr
|
||||||
|
end
|
||||||
|
|
||||||
|
sub_test_case "full" do
|
||||||
|
def setup
|
||||||
|
super
|
||||||
|
@whisper = Whisper::Context.new("base.en")
|
||||||
|
@samples = File.read(AUDIO, nil, 78).unpack("s<*").collect {|i| i.to_f / 2**15}
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full
|
||||||
|
@whisper.full(@params, @samples, @samples.length)
|
||||||
|
|
||||||
|
assert_equal 1, @whisper.full_n_segments
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, @whisper.each_segment.first.text
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_without_length
|
||||||
|
@whisper.full(@params, @samples)
|
||||||
|
|
||||||
|
assert_equal 1, @whisper.full_n_segments
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, @whisper.each_segment.first.text
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_enumerator
|
||||||
|
samples = @samples.each
|
||||||
|
@whisper.full(@params, samples, @samples.length)
|
||||||
|
|
||||||
|
assert_equal 1, @whisper.full_n_segments
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, @whisper.each_segment.first.text
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_enumerator_without_length
|
||||||
|
samples = @samples.each
|
||||||
|
assert_raise ArgumentError do
|
||||||
|
@whisper.full(@params, samples)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_enumerator_with_too_large_length
|
||||||
|
samples = @samples.each.take(10).to_enum
|
||||||
|
assert_raise StopIteration do
|
||||||
|
@whisper.full(@params, samples, 11)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_with_memory_view
|
||||||
|
samples = JFKReader.new(AUDIO)
|
||||||
|
@whisper.full(@params, samples)
|
||||||
|
|
||||||
|
assert_equal 1, @whisper.full_n_segments
|
||||||
|
assert_match /ask not what your country can do for you, ask what you can do for your country/, @whisper.each_segment.first.text
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_parallel
|
||||||
|
@whisper.full_parallel(@params, @samples, @samples.length, Etc.nprocessors)
|
||||||
|
|
||||||
|
assert_equal Etc.nprocessors, @whisper.full_n_segments
|
||||||
|
text = @whisper.each_segment.collect(&:text).join
|
||||||
|
assert_match /ask what you can do/i, text
|
||||||
|
assert_match /for your country/i, text
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_parallel_with_memory_view
|
||||||
|
samples = JFKReader.new(AUDIO)
|
||||||
|
@whisper.full_parallel(@params, samples, nil, Etc.nprocessors)
|
||||||
|
|
||||||
|
assert_equal Etc.nprocessors, @whisper.full_n_segments
|
||||||
|
text = @whisper.each_segment.collect(&:text).join
|
||||||
|
assert_match /ask what you can do/i, text
|
||||||
|
assert_match /for your country/i, text
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_parallel_without_length_and_n_processors
|
||||||
|
@whisper.full_parallel(@params, @samples)
|
||||||
|
|
||||||
|
assert_equal 1, @whisper.full_n_segments
|
||||||
|
text = @whisper.each_segment.collect(&:text).join
|
||||||
|
assert_match /ask what you can do/i, text
|
||||||
|
assert_match /for your country/i, text
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_parallel_without_length
|
||||||
|
@whisper.full_parallel(@params, @samples, nil, Etc.nprocessors)
|
||||||
|
|
||||||
|
assert_equal Etc.nprocessors, @whisper.full_n_segments
|
||||||
|
text = @whisper.each_segment.collect(&:text).join
|
||||||
|
assert_match /ask what you can do/i, text
|
||||||
|
assert_match /for your country/i, text
|
||||||
|
end
|
||||||
|
|
||||||
|
def test_full_parallel_without_n_processors
|
||||||
|
@whisper.full_parallel(@params, @samples, @samples.length)
|
||||||
|
|
||||||
|
assert_equal 1, @whisper.full_n_segments
|
||||||
|
text = @whisper.each_segment.collect(&:text).join
|
||||||
|
assert_match /ask what you can do/i, text
|
||||||
|
assert_match /for your country/i, text
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
@ -1,28 +1,36 @@
|
|||||||
|
require_relative "extsources"
|
||||||
|
|
||||||
Gem::Specification.new do |s|
|
Gem::Specification.new do |s|
|
||||||
s.name = "whispercpp"
|
s.name = "whispercpp"
|
||||||
s.authors = ["Georgi Gerganov", "Todd A. Fisher"]
|
s.authors = ["Georgi Gerganov", "Todd A. Fisher"]
|
||||||
s.version = '1.3.0'
|
s.version = '1.3.1'
|
||||||
s.date = '2024-05-14'
|
s.date = '2024-12-19'
|
||||||
s.description = %q{High-performance inference of OpenAI's Whisper automatic speech recognition (ASR) model via Ruby}
|
s.description = %q{High-performance inference of OpenAI's Whisper automatic speech recognition (ASR) model via Ruby}
|
||||||
s.email = 'todd.fisher@gmail.com'
|
s.email = 'todd.fisher@gmail.com'
|
||||||
s.extra_rdoc_files = ['LICENSE', 'README.md']
|
s.extra_rdoc_files = ['LICENSE', 'README.md']
|
||||||
|
|
||||||
s.files = ["LICENSE", "README.md", "Rakefile", "ext/extconf.rb", "ext/ggml.c", "ext/ruby_whisper.cpp", "ext/whisper.cpp", "ext/dr_wav.h", "ext/ggml.h", "ext/ruby_whisper.h", "ext/whisper.h"]
|
|
||||||
|
|
||||||
#### Load-time details
|
s.files = `git ls-files . -z`.split("\x0") +
|
||||||
s.require_paths = ['lib','ext']
|
EXTSOURCES.collect {|file|
|
||||||
|
basename = File.basename(file)
|
||||||
|
if s.extra_rdoc_files.include?(basename)
|
||||||
|
basename
|
||||||
|
else
|
||||||
|
file.sub("../..", "ext")
|
||||||
|
end
|
||||||
|
}
|
||||||
|
|
||||||
s.summary = %q{Ruby whisper.cpp bindings}
|
s.summary = %q{Ruby whisper.cpp bindings}
|
||||||
s.test_files = ["tests/test_whisper.rb"]
|
s.test_files = s.files.select {|file| file.start_with? "tests/"}
|
||||||
|
|
||||||
s.extensions << 'ext/extconf.rb'
|
s.extensions << 'ext/extconf.rb'
|
||||||
|
s.required_ruby_version = '>= 3.1.0'
|
||||||
|
|
||||||
#### Documentation and testing.
|
#### Documentation and testing.
|
||||||
s.homepage = 'https://github.com/ggerganov/whisper.cpp'
|
s.homepage = 'https://github.com/ggerganov/whisper.cpp'
|
||||||
s.rdoc_options = ['--main', '../../README.md']
|
s.rdoc_options = ['--main', 'README.md']
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
s.platform = Gem::Platform::RUBY
|
s.platform = Gem::Platform::RUBY
|
||||||
|
|
||||||
s.licenses = ['MIT']
|
s.licenses = ['MIT']
|
||||||
end
|
end
|
||||||
|
@ -1,54 +0,0 @@
|
|||||||
# Add new build types
|
|
||||||
|
|
||||||
# ReleaseGG - Release with enabled asserts
|
|
||||||
|
|
||||||
SET(CMAKE_CXX_FLAGS_RELEASEGG
|
|
||||||
"-O3"
|
|
||||||
CACHE STRING "Flags used by the c++ compiler during release builds with enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
SET(CMAKE_C_FLAGS_RELEASEGG
|
|
||||||
"-O3"
|
|
||||||
CACHE STRING "Flags used by the compiler during release builds with enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
SET(CMAKE_EXE_LINKER_FLAGS_RELEASEGG
|
|
||||||
""
|
|
||||||
CACHE STRING "Flags used for linking binaries during release builds with enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
SET(CMAKE_SHARED_LINKER_FLAGS_RELEASEGG
|
|
||||||
""
|
|
||||||
CACHE STRING "Flags used by the shared libraries linker during release builds with enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
MARK_AS_ADVANCED(
|
|
||||||
CMAKE_CXX_FLAGS_RELEASEGG
|
|
||||||
CMAKE_C_FLAGS_RELEASEGG
|
|
||||||
CMAKE_EXE_LINKER_FLAGS_RELEASEGG
|
|
||||||
CMAKE_SHARED_LINKER_FLAGS_RELEASEGG )
|
|
||||||
|
|
||||||
# RelWithDebInfoGG - RelWithDebInfo with enabled asserts
|
|
||||||
|
|
||||||
SET(CMAKE_CXX_FLAGS_RELWITHDEBINFOGG
|
|
||||||
"-O2 -g"
|
|
||||||
CACHE STRING "Flags used by the c++ compiler during release builds with debug symbols and enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
SET(CMAKE_C_FLAGS_RELWITHDEBINFOGG
|
|
||||||
"-O2 -g"
|
|
||||||
CACHE STRING "Flags used by the compiler during release builds with debug symbols and enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
SET(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFOGG
|
|
||||||
""
|
|
||||||
CACHE STRING "Flags used for linking binaries during release builds with debug symbols and enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
SET(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFOGG
|
|
||||||
""
|
|
||||||
CACHE STRING "Flags used by the shared libraries linker during release builds with debug symbols and enabled asserts."
|
|
||||||
FORCE )
|
|
||||||
MARK_AS_ADVANCED(
|
|
||||||
CMAKE_CXX_FLAGS_RELWITHDEBINFOGG
|
|
||||||
CMAKE_C_FLAGS_RELWITHDEBINFOGG
|
|
||||||
CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFOGG
|
|
||||||
CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFOGG )
|
|
||||||
|
|
||||||
if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
|
|
||||||
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
|
|
||||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo" "ReleaseGG" "RelWithDebInfoGG")
|
|
||||||
endif()
|
|
@ -13,5 +13,4 @@ set_target_properties(${TARGET}
|
|||||||
PROPERTIES
|
PROPERTIES
|
||||||
EXPORT_COMPILE_COMMANDS ON
|
EXPORT_COMPILE_COMMANDS ON
|
||||||
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin"
|
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin"
|
||||||
INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib"
|
|
||||||
)
|
)
|
||||||
|
@ -36,7 +36,7 @@ include(FindPackageHandleStandardArgs)
|
|||||||
|
|
||||||
# The default components were taken from a survey over other FindFFMPEG.cmake files
|
# The default components were taken from a survey over other FindFFMPEG.cmake files
|
||||||
if (NOT FFmpeg_FIND_COMPONENTS)
|
if (NOT FFmpeg_FIND_COMPONENTS)
|
||||||
set(FFmpeg_FIND_COMPONENTS AVFORMAT AVCODEC AVUTIL SWRESAMPLE)
|
set(FFmpeg_FIND_COMPONENTS AVFORMAT AVCODEC AVUTIL SWRESAMPLE)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -84,7 +84,7 @@ macro(find_component _component _pkgconfig _library _header)
|
|||||||
|
|
||||||
# CMake's default is to search first for shared libraries and then for static libraries.
|
# CMake's default is to search first for shared libraries and then for static libraries.
|
||||||
# Todo later: add option to prefer static libs over dynamic:
|
# Todo later: add option to prefer static libs over dynamic:
|
||||||
find_library(${_component}_LIBRARIES NAMES ${_library} lib${_library}.a
|
find_library(${_component}_LIBRARIES NAMES ${_library} lib${_library}.a
|
||||||
HINTS
|
HINTS
|
||||||
${PC_${_component}_LIBDIR}
|
${PC_${_component}_LIBDIR}
|
||||||
${PC_${_component}_LIBRARY_DIRS}
|
${PC_${_component}_LIBRARY_DIRS}
|
||||||
|
58
cmake/build-info.cmake
Normal file
58
cmake/build-info.cmake
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
set(BUILD_NUMBER 0)
|
||||||
|
set(BUILD_COMMIT "unknown")
|
||||||
|
set(BUILD_COMPILER "unknown")
|
||||||
|
set(BUILD_TARGET "unknown")
|
||||||
|
|
||||||
|
# Look for git
|
||||||
|
find_package(Git)
|
||||||
|
if(NOT Git_FOUND)
|
||||||
|
find_program(GIT_EXECUTABLE NAMES git git.exe)
|
||||||
|
if(GIT_EXECUTABLE)
|
||||||
|
set(Git_FOUND TRUE)
|
||||||
|
message(STATUS "Found Git: ${GIT_EXECUTABLE}")
|
||||||
|
else()
|
||||||
|
message(WARNING "Git not found. Build info will not be accurate.")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# Get the commit count and hash
|
||||||
|
if(Git_FOUND)
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${GIT_EXECUTABLE} rev-parse --short HEAD
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
|
OUTPUT_VARIABLE HEAD
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
RESULT_VARIABLE RES
|
||||||
|
)
|
||||||
|
if (RES EQUAL 0)
|
||||||
|
set(BUILD_COMMIT ${HEAD})
|
||||||
|
endif()
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${GIT_EXECUTABLE} rev-list --count HEAD
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||||
|
OUTPUT_VARIABLE COUNT
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
RESULT_VARIABLE RES
|
||||||
|
)
|
||||||
|
if (RES EQUAL 0)
|
||||||
|
set(BUILD_NUMBER ${COUNT})
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(MSVC)
|
||||||
|
set(BUILD_COMPILER "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}")
|
||||||
|
set(BUILD_TARGET ${CMAKE_VS_PLATFORM_NAME})
|
||||||
|
else()
|
||||||
|
execute_process(
|
||||||
|
COMMAND sh -c "$@ --version | head -1" _ ${CMAKE_C_COMPILER}
|
||||||
|
OUTPUT_VARIABLE OUT
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
)
|
||||||
|
set(BUILD_COMPILER ${OUT})
|
||||||
|
execute_process(
|
||||||
|
COMMAND ${CMAKE_C_COMPILER} -dumpmachine
|
||||||
|
OUTPUT_VARIABLE OUT
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||||
|
)
|
||||||
|
set(BUILD_TARGET ${OUT})
|
||||||
|
endif()
|
65
cmake/whisper-config.cmake.in
Normal file
65
cmake/whisper-config.cmake.in
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
set(WHISPER_VERSION @WHISPER_INSTALL_VERSION@)
|
||||||
|
set(WHISPER_BUILD_COMMIT @WHISPER_BUILD_COMMIT@)
|
||||||
|
set(WHISPER_BUILD_NUMBER @WHISPER_BUILD_NUMBER@)
|
||||||
|
set(WHISPER_SHARED_LIB @BUILD_SHARED_LIBS@)
|
||||||
|
|
||||||
|
set(GGML_BLAS @GGML_BLAS@)
|
||||||
|
set(GGML_CUDA @GGML_CUDA@)
|
||||||
|
set(GGML_METAL @GGML_METAL@)
|
||||||
|
set(GGML_HIPBLAS @GGML_HIPBLAS@)
|
||||||
|
set(GGML_ACCELERATE @GGML_ACCELERATE@)
|
||||||
|
|
||||||
|
@PACKAGE_INIT@
|
||||||
|
|
||||||
|
set_and_check(WHISPER_INCLUDE_DIR "@PACKAGE_WHISPER_INCLUDE_INSTALL_DIR@")
|
||||||
|
set_and_check(WHISPER_LIB_DIR "@PACKAGE_WHISPER_LIB_INSTALL_DIR@")
|
||||||
|
set_and_check(WHISPER_BIN_DIR "@PACKAGE_WHISPER_BIN_INSTALL_DIR@")
|
||||||
|
|
||||||
|
# Ensure transient dependencies satisfied
|
||||||
|
|
||||||
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
|
if (APPLE AND GGML_ACCELERATE)
|
||||||
|
find_library(ACCELERATE_FRAMEWORK Accelerate REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (GGML_BLAS)
|
||||||
|
find_package(BLAS REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (GGML_CUDA)
|
||||||
|
find_package(CUDAToolkit REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (GGML_METAL)
|
||||||
|
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
|
||||||
|
find_library(METAL_FRAMEWORK Metal REQUIRED)
|
||||||
|
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (GGML_HIPBLAS)
|
||||||
|
find_package(hip REQUIRED)
|
||||||
|
find_package(hipblas REQUIRED)
|
||||||
|
find_package(rocblas REQUIRED)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
find_library(whisper_LIBRARY whisper
|
||||||
|
REQUIRED
|
||||||
|
HINTS ${WHISPER_LIB_DIR})
|
||||||
|
|
||||||
|
set(_whisper_link_deps "Threads::Threads" "@WHISPER_EXTRA_LIBS@")
|
||||||
|
set(_whisper_transient_defines "@WHISPER_TRANSIENT_DEFINES@")
|
||||||
|
|
||||||
|
add_library(whisper UNKNOWN IMPORTED)
|
||||||
|
|
||||||
|
set_target_properties(whisper
|
||||||
|
PROPERTIES
|
||||||
|
INTERFACE_INCLUDE_DIRECTORIES "${WHISPER_INCLUDE_DIR}"
|
||||||
|
INTERFACE_LINK_LIBRARIES "${_whisper_link_deps}"
|
||||||
|
INTERFACE_COMPILE_DEFINITIONS "${_whisper_transient_defines}"
|
||||||
|
IMPORTED_LINK_INTERFACE_LANGUAGES "CXX"
|
||||||
|
IMPORTED_LOCATION "${whisper_LIBRARY}"
|
||||||
|
INTERFACE_COMPILE_FEATURES cxx_std_11
|
||||||
|
POSITION_INDEPENDENT_CODE ON )
|
||||||
|
|
||||||
|
check_required_components(whisper)
|
10
cmake/whisper.pc.in
Normal file
10
cmake/whisper.pc.in
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
prefix=@CMAKE_INSTALL_PREFIX@
|
||||||
|
exec_prefix=${prefix}
|
||||||
|
libdir=${exec_prefix}/lib
|
||||||
|
includedir=${prefix}/include
|
||||||
|
|
||||||
|
Name: whisper
|
||||||
|
Description: Port of OpenAI's Whisper model in C/C++
|
||||||
|
Version: @PROJECT_VERSION@
|
||||||
|
Libs: -L${libdir} -lggml -lggml-base -lwhisper
|
||||||
|
Cflags: -I${includedir}
|
@ -11,7 +11,7 @@ if (WHISPER_SDL2)
|
|||||||
string(STRIP "${SDL2_LIBRARIES}" SDL2_LIBRARIES)
|
string(STRIP "${SDL2_LIBRARIES}" SDL2_LIBRARIES)
|
||||||
|
|
||||||
message(STATUS "SDL2_INCLUDE_DIRS = ${SDL2_INCLUDE_DIRS}")
|
message(STATUS "SDL2_INCLUDE_DIRS = ${SDL2_INCLUDE_DIRS}")
|
||||||
message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
|
message(STATUS "SDL2_LIBRARIES = ${SDL2_LIBRARIES}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (WHISPER_CLBLAST)
|
if (WHISPER_CLBLAST)
|
||||||
@ -22,10 +22,35 @@ endif()
|
|||||||
|
|
||||||
set(TARGET common)
|
set(TARGET common)
|
||||||
|
|
||||||
|
unset(COMMON_EXTRA_LIBS)
|
||||||
|
|
||||||
if (WHISPER_FFMPEG)
|
if (WHISPER_FFMPEG)
|
||||||
|
# As of cmake 3.27, there is no official cmake support for FindFFmpeg.
|
||||||
|
# Consequnelty we added a FindFFmpeg.cmake script the cmake subfolder:
|
||||||
|
# whisper.cpp does not need the full ffmpeg libs, just AVFORMAT AVCODEC AVUTIL SWRESAMPLE
|
||||||
|
# libswresample performs highly optimized audio resampling, rematrixing and sample format conversion operations
|
||||||
|
# libavcodec provides a generic encoding/decoding framework and contains multiple decoders and encoders for audio, video and subtitle streams, and several bitstream filters.
|
||||||
|
# libavformat provides a generic framework for multiplexing and demultiplexing (muxing and demuxing) audio, video and subtitle streams.
|
||||||
|
find_package(FFmpeg REQUIRED)
|
||||||
|
|
||||||
|
if (NOT ${FFMPEG_FOUND})
|
||||||
|
message(FATAL_ERROR "Cannot find ffmpeg libs/headers")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
message(STATUS "Found ffmpeg libs: ${FFMPEG_LIBRARIES}")
|
||||||
|
message(STATUS "Found ffmpeg headers in: ${FFMPEG_INCLUDE_DIRS}")
|
||||||
|
message(STATUS "ffmpeg definitions: ${FFMPEG_DEFINITIONS}")
|
||||||
|
message(STATUS "Found avformat ${AVFORMAT_VERSION}")
|
||||||
|
|
||||||
|
include_directories(${FFMPEG_INCLUDE_DIRS})
|
||||||
|
add_compile_definitions(WHISPER_FFMPEG)
|
||||||
|
|
||||||
|
list(APPEND COMMON_EXTRA_LIBS ${FFMPEG_LIBRARIES})
|
||||||
|
|
||||||
set(COMMON_SOURCES_FFMPEG ffmpeg-transcode.cpp)
|
set(COMMON_SOURCES_FFMPEG ffmpeg-transcode.cpp)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
add_library(${TARGET} STATIC
|
add_library(${TARGET} STATIC
|
||||||
common.h
|
common.h
|
||||||
common.cpp
|
common.cpp
|
||||||
@ -38,7 +63,7 @@ add_library(${TARGET} STATIC
|
|||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE whisper)
|
target_link_libraries(${TARGET} PRIVATE whisper ${COMMON_EXTRA_LIBS})
|
||||||
|
|
||||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
||||||
@ -55,8 +80,8 @@ if (WHISPER_SDL2)
|
|||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC ${SDL2_INCLUDE_DIRS})
|
target_include_directories(${TARGET} PUBLIC ${SDL2_INCLUDE_DIRS})
|
||||||
target_link_libraries(${TARGET} PRIVATE ${SDL2_LIBRARIES})
|
target_link_libraries (${TARGET} PRIVATE ${SDL2_LIBRARIES})
|
||||||
|
|
||||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
set_target_properties(${TARGET} PROPERTIES FOLDER "libs")
|
||||||
@ -72,50 +97,29 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
|||||||
|
|
||||||
if (EMSCRIPTEN)
|
if (EMSCRIPTEN)
|
||||||
add_subdirectory(whisper.wasm)
|
add_subdirectory(whisper.wasm)
|
||||||
set_target_properties(libmain PROPERTIES FOLDER "libs")
|
|
||||||
add_subdirectory(stream.wasm)
|
add_subdirectory(stream.wasm)
|
||||||
set_target_properties(libstream PROPERTIES FOLDER "libs")
|
|
||||||
add_subdirectory(command.wasm)
|
add_subdirectory(command.wasm)
|
||||||
set_target_properties(libcommand PROPERTIES FOLDER "libs")
|
|
||||||
add_subdirectory(talk.wasm)
|
|
||||||
set_target_properties(libtalk PROPERTIES FOLDER "libs")
|
|
||||||
add_subdirectory(bench.wasm)
|
add_subdirectory(bench.wasm)
|
||||||
set_target_properties(libbench PROPERTIES FOLDER "libs")
|
|
||||||
elseif(CMAKE_JS_VERSION)
|
elseif(CMAKE_JS_VERSION)
|
||||||
add_subdirectory(addon.node)
|
add_subdirectory(addon.node)
|
||||||
set_target_properties(addon.node PROPERTIES FOLDER "examples")
|
|
||||||
else()
|
else()
|
||||||
add_subdirectory(main)
|
add_subdirectory(cli)
|
||||||
set_target_properties(main PROPERTIES FOLDER "examples")
|
|
||||||
if (WHISPER_SDL2)
|
|
||||||
add_subdirectory(stream)
|
|
||||||
set_target_properties(stream PROPERTIES FOLDER "examples")
|
|
||||||
endif (WHISPER_SDL2)
|
|
||||||
add_subdirectory(server)
|
|
||||||
set_target_properties(server PROPERTIES FOLDER "examples")
|
|
||||||
if (WHISPER_SDL2)
|
|
||||||
add_subdirectory(command)
|
|
||||||
set_target_properties(command PROPERTIES FOLDER "examples")
|
|
||||||
endif (WHISPER_SDL2)
|
|
||||||
add_subdirectory(bench)
|
add_subdirectory(bench)
|
||||||
set_target_properties(bench PROPERTIES FOLDER "examples")
|
add_subdirectory(server)
|
||||||
add_subdirectory(quantize)
|
add_subdirectory(quantize)
|
||||||
set_target_properties(quantize PROPERTIES FOLDER "examples")
|
if (WHISPER_SDL2)
|
||||||
if (WHISPER_SDL2)
|
add_subdirectory(stream)
|
||||||
add_subdirectory(talk)
|
add_subdirectory(command)
|
||||||
set_target_properties(talk PROPERTIES FOLDER "examples")
|
add_subdirectory(talk-llama)
|
||||||
add_subdirectory(talk-llama)
|
add_subdirectory(lsp)
|
||||||
set_target_properties(talk-llama PROPERTIES FOLDER "examples")
|
if (GGML_SYCL)
|
||||||
add_subdirectory(lsp)
|
add_subdirectory(sycl)
|
||||||
set_target_properties(lsp PROPERTIES FOLDER "examples")
|
endif()
|
||||||
if (LLAMA_SYCL)
|
endif (WHISPER_SDL2)
|
||||||
add_subdirectory(sycl)
|
|
||||||
set_target_properties(sycl PROPERTIES FOLDER "examples")
|
add_subdirectory(deprecation-warning)
|
||||||
endif()
|
|
||||||
endif (WHISPER_SDL2)
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (WHISPER_SDL2)
|
if (WHISPER_SDL2)
|
||||||
add_subdirectory(wchess)
|
add_subdirectory(wchess)
|
||||||
set_target_properties(wchess PROPERTIES FOLDER "examples")
|
|
||||||
endif (WHISPER_SDL2)
|
endif (WHISPER_SDL2)
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
set(TARGET bench)
|
set(TARGET whisper-bench)
|
||||||
add_executable(${TARGET} bench.cpp)
|
add_executable(${TARGET} bench.cpp)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE whisper ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# bench
|
# whisper.cpp/examples/bench
|
||||||
|
|
||||||
A very basic tool for benchmarking the inference performance on your device. The tool simply runs the Encoder part of
|
A very basic tool for benchmarking the inference performance on your device. The tool simply runs the Encoder part of
|
||||||
the transformer on some random audio data and records the execution time. This way we can have an objective comparison
|
the transformer on some random audio data and records the execution time. This way we can have an objective comparison
|
||||||
@ -7,11 +7,8 @@ of the performance of the model for various setups.
|
|||||||
Benchmark results are tracked in the following Github issue: https://github.com/ggerganov/whisper.cpp/issues/89
|
Benchmark results are tracked in the following Github issue: https://github.com/ggerganov/whisper.cpp/issues/89
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# build the bench tool
|
# run the bench too on the small.en model using 4 threads
|
||||||
$ make bench
|
$ ./build/bin/whisper-bench -m ./models/ggml-small.en.bin -t 4
|
||||||
|
|
||||||
# run it on the small.en model using 4 threads
|
|
||||||
$ ./bench -m ./models/ggml-small.en.bin -t 4
|
|
||||||
|
|
||||||
whisper_model_load: loading model from './models/ggml-small.en.bin'
|
whisper_model_load: loading model from './models/ggml-small.en.bin'
|
||||||
whisper_model_load: n_vocab = 51864
|
whisper_model_load: n_vocab = 51864
|
||||||
|
@ -18,7 +18,7 @@ struct whisper_params {
|
|||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
|
|
||||||
bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
|
|
||||||
@ -58,7 +58,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
int whisper_bench_full(const whisper_params & params) {
|
static int whisper_bench_full(const whisper_params & params) {
|
||||||
// whisper init
|
// whisper init
|
||||||
|
|
||||||
struct whisper_context_params cparams = whisper_context_default_params();
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
set(TARGET main)
|
set(TARGET whisper-cli)
|
||||||
add_executable(${TARGET} main.cpp)
|
add_executable(${TARGET} cli.cpp)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common whisper ${FFMPEG_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common whisper ${FFMPEG_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
@ -1,12 +1,12 @@
|
|||||||
# main
|
# whisper.cpp/examples/cli
|
||||||
|
|
||||||
This is the main example demonstrating most of the functionality of the Whisper model.
|
This is the main example demonstrating most of the functionality of the Whisper model.
|
||||||
It can be used as a reference for using the `whisper.cpp` library in other projects.
|
It can be used as a reference for using the `whisper.cpp` library in other projects.
|
||||||
|
|
||||||
```
|
```
|
||||||
./main -h
|
./build/bin/whisper-cli -h
|
||||||
|
|
||||||
usage: ./main [options] file0.wav file1.wav ...
|
usage: ./build-pkg/bin/whisper-cli [options] file0.wav file1.wav ...
|
||||||
|
|
||||||
options:
|
options:
|
||||||
-h, --help [default] show this help message and exit
|
-h, --help [default] show this help message and exit
|
||||||
@ -20,9 +20,12 @@ options:
|
|||||||
-sow, --split-on-word [false ] split on word rather than on token
|
-sow, --split-on-word [false ] split on word rather than on token
|
||||||
-bo N, --best-of N [5 ] number of best candidates to keep
|
-bo N, --best-of N [5 ] number of best candidates to keep
|
||||||
-bs N, --beam-size N [5 ] beam size for beam search
|
-bs N, --beam-size N [5 ] beam size for beam search
|
||||||
|
-ac N, --audio-ctx N [0 ] audio context size (0 - all)
|
||||||
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
|
||||||
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
|
||||||
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
|
||||||
|
-tp, --temperature N [0.00 ] The sampling temperature, between 0 and 1
|
||||||
|
-tpi, --temperature-inc N [0.20 ] The increment of temperature, between 0 and 1
|
||||||
-debug, --debug-mode [false ] enable debug mode (eg. dump log_mel)
|
-debug, --debug-mode [false ] enable debug mode (eg. dump log_mel)
|
||||||
-tr, --translate [false ] translate from source language to english
|
-tr, --translate [false ] translate from source language to english
|
||||||
-di, --diarize [false ] stereo audio diarization
|
-di, --diarize [false ] stereo audio diarization
|
||||||
@ -38,16 +41,23 @@ options:
|
|||||||
-oj, --output-json [false ] output result in a JSON file
|
-oj, --output-json [false ] output result in a JSON file
|
||||||
-ojf, --output-json-full [false ] include more information in the JSON file
|
-ojf, --output-json-full [false ] include more information in the JSON file
|
||||||
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
|
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
|
||||||
|
-np, --no-prints [false ] do not print anything other than the results
|
||||||
-ps, --print-special [false ] print special tokens
|
-ps, --print-special [false ] print special tokens
|
||||||
-pc, --print-colors [false ] print colors
|
-pc, --print-colors [false ] print colors
|
||||||
-pp, --print-progress [false ] print progress
|
-pp, --print-progress [false ] print progress
|
||||||
-nt, --no-timestamps [false ] do not print timestamps
|
-nt, --no-timestamps [false ] do not print timestamps
|
||||||
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
|
||||||
-dl, --detect-language [false ] exit after automatically detecting language
|
-dl, --detect-language [false ] exit after automatically detecting language
|
||||||
--prompt PROMPT [ ] initial prompt
|
--prompt PROMPT [ ] initial prompt (max n_text_ctx/2 tokens)
|
||||||
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
|
||||||
-f FNAME, --file FNAME [ ] input WAV file path
|
-f FNAME, --file FNAME [ ] input WAV file path
|
||||||
-oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference
|
-oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference
|
||||||
|
-dtw MODEL --dtw MODEL [ ] compute token-level timestamps
|
||||||
-ls, --log-score [false ] log best decoder scores of tokens
|
-ls, --log-score [false ] log best decoder scores of tokens
|
||||||
-ng, --no-gpu [false ] disable GPU
|
-ng, --no-gpu [false ] disable GPU
|
||||||
|
-fa, --flash-attn [false ] flash attention
|
||||||
|
--suppress-regex REGEX [ ] regular expression matching tokens to suppress
|
||||||
|
--grammar GRAMMAR [ ] GBNF grammar to guide decoding
|
||||||
|
--grammar-rule RULE [ ] top-level GBNF grammar rule name
|
||||||
|
--grammar-penalty N [100.0 ] scales down logits of nongrammar tokens
|
||||||
```
|
```
|
@ -17,7 +17,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// helper function to replace substrings
|
// helper function to replace substrings
|
||||||
void replace_all(std::string & s, const std::string & search, const std::string & replace) {
|
static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
|
||||||
for (size_t pos = 0; ; pos += replace.length()) {
|
for (size_t pos = 0; ; pos += replace.length()) {
|
||||||
pos = s.find(search, pos);
|
pos = s.find(search, pos);
|
||||||
if (pos == std::string::npos) break;
|
if (pos == std::string::npos) break;
|
||||||
@ -43,6 +43,7 @@ struct whisper_params {
|
|||||||
float word_thold = 0.01f;
|
float word_thold = 0.01f;
|
||||||
float entropy_thold = 2.40f;
|
float entropy_thold = 2.40f;
|
||||||
float logprob_thold = -1.00f;
|
float logprob_thold = -1.00f;
|
||||||
|
float no_speech_thold = 0.6f;
|
||||||
float grammar_penalty = 100.0f;
|
float grammar_penalty = 100.0f;
|
||||||
float temperature = 0.0f;
|
float temperature = 0.0f;
|
||||||
float temperature_inc = 0.2f;
|
float temperature_inc = 0.2f;
|
||||||
@ -70,6 +71,7 @@ struct whisper_params {
|
|||||||
bool log_score = false;
|
bool log_score = false;
|
||||||
bool use_gpu = true;
|
bool use_gpu = true;
|
||||||
bool flash_attn = false;
|
bool flash_attn = false;
|
||||||
|
bool suppress_nst = false;
|
||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string prompt;
|
std::string prompt;
|
||||||
@ -94,17 +96,22 @@ struct whisper_params {
|
|||||||
grammar_parser::parse_state grammar_parsed;
|
grammar_parser::parse_state grammar_parsed;
|
||||||
};
|
};
|
||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
static void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
|
|
||||||
char* whisper_param_turn_lowercase(char* in){
|
static char * whisper_param_turn_lowercase(char * in){
|
||||||
int string_len = strlen(in);
|
int string_len = strlen(in);
|
||||||
for(int i = 0; i < string_len; i++){
|
for (int i = 0; i < string_len; i++){
|
||||||
*(in+i) = tolower((unsigned char)*(in+i));
|
*(in+i) = tolower((unsigned char)*(in+i));
|
||||||
}
|
}
|
||||||
return in;
|
return in;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
static char * requires_value_error(const std::string & arg) {
|
||||||
|
fprintf(stderr, "error: argument %s requires value\n", arg.c_str());
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
|
|
||||||
@ -122,21 +129,23 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); }
|
#define ARGV_NEXT (((i + 1) < argc) ? argv[++i] : requires_value_error(arg))
|
||||||
else if (arg == "-p" || arg == "--processors") { params.n_processors = std::stoi(argv[++i]); }
|
else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-ot" || arg == "--offset-t") { params.offset_t_ms = std::stoi(argv[++i]); }
|
else if (arg == "-p" || arg == "--processors") { params.n_processors = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-on" || arg == "--offset-n") { params.offset_n = std::stoi(argv[++i]); }
|
else if (arg == "-ot" || arg == "--offset-t") { params.offset_t_ms = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-d" || arg == "--duration") { params.duration_ms = std::stoi(argv[++i]); }
|
else if (arg == "-on" || arg == "--offset-n") { params.offset_n = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-mc" || arg == "--max-context") { params.max_context = std::stoi(argv[++i]); }
|
else if (arg == "-d" || arg == "--duration") { params.duration_ms = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-ml" || arg == "--max-len") { params.max_len = std::stoi(argv[++i]); }
|
else if (arg == "-mc" || arg == "--max-context") { params.max_context = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-bo" || arg == "--best-of") { params.best_of = std::stoi(argv[++i]); }
|
else if (arg == "-ml" || arg == "--max-len") { params.max_len = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(argv[++i]); }
|
else if (arg == "-bo" || arg == "--best-of") { params.best_of = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); }
|
else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-wt" || arg == "--word-thold") { params.word_thold = std::stof(argv[++i]); }
|
else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(ARGV_NEXT); }
|
||||||
else if (arg == "-et" || arg == "--entropy-thold") { params.entropy_thold = std::stof(argv[++i]); }
|
else if (arg == "-wt" || arg == "--word-thold") { params.word_thold = std::stof(ARGV_NEXT); }
|
||||||
else if (arg == "-lpt" || arg == "--logprob-thold") { params.logprob_thold = std::stof(argv[++i]); }
|
else if (arg == "-et" || arg == "--entropy-thold") { params.entropy_thold = std::stof(ARGV_NEXT); }
|
||||||
else if (arg == "-tp" || arg == "--temperature") { params.temperature = std::stof(argv[++i]); }
|
else if (arg == "-lpt" || arg == "--logprob-thold") { params.logprob_thold = std::stof(ARGV_NEXT); }
|
||||||
else if (arg == "-tpi" || arg == "--temperature-inc") { params.temperature_inc = std::stof(argv[++i]); }
|
else if (arg == "-nth" || arg == "--no-speech-thold") { params.no_speech_thold = std::stof(ARGV_NEXT); }
|
||||||
|
else if (arg == "-tp" || arg == "--temperature") { params.temperature = std::stof(ARGV_NEXT); }
|
||||||
|
else if (arg == "-tpi" || arg == "--temperature-inc") { params.temperature_inc = std::stof(ARGV_NEXT); }
|
||||||
else if (arg == "-debug"|| arg == "--debug-mode") { params.debug_mode = true; }
|
else if (arg == "-debug"|| arg == "--debug-mode") { params.debug_mode = true; }
|
||||||
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
else if (arg == "-tr" || arg == "--translate") { params.translate = true; }
|
||||||
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
|
else if (arg == "-di" || arg == "--diarize") { params.diarize = true; }
|
||||||
@ -148,30 +157,31 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-osrt" || arg == "--output-srt") { params.output_srt = true; }
|
else if (arg == "-osrt" || arg == "--output-srt") { params.output_srt = true; }
|
||||||
else if (arg == "-owts" || arg == "--output-words") { params.output_wts = true; }
|
else if (arg == "-owts" || arg == "--output-words") { params.output_wts = true; }
|
||||||
else if (arg == "-olrc" || arg == "--output-lrc") { params.output_lrc = true; }
|
else if (arg == "-olrc" || arg == "--output-lrc") { params.output_lrc = true; }
|
||||||
else if (arg == "-fp" || arg == "--font-path") { params.font_path = argv[++i]; }
|
else if (arg == "-fp" || arg == "--font-path") { params.font_path = ARGV_NEXT; }
|
||||||
else if (arg == "-ocsv" || arg == "--output-csv") { params.output_csv = true; }
|
else if (arg == "-ocsv" || arg == "--output-csv") { params.output_csv = true; }
|
||||||
else if (arg == "-oj" || arg == "--output-json") { params.output_jsn = true; }
|
else if (arg == "-oj" || arg == "--output-json") { params.output_jsn = true; }
|
||||||
else if (arg == "-ojf" || arg == "--output-json-full"){ params.output_jsn_full = params.output_jsn = true; }
|
else if (arg == "-ojf" || arg == "--output-json-full"){ params.output_jsn_full = params.output_jsn = true; }
|
||||||
else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(argv[++i]); }
|
else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(ARGV_NEXT); }
|
||||||
else if (arg == "-np" || arg == "--no-prints") { params.no_prints = true; }
|
else if (arg == "-np" || arg == "--no-prints") { params.no_prints = true; }
|
||||||
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
||||||
else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
|
else if (arg == "-pc" || arg == "--print-colors") { params.print_colors = true; }
|
||||||
else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
|
else if (arg == "-pp" || arg == "--print-progress") { params.print_progress = true; }
|
||||||
else if (arg == "-nt" || arg == "--no-timestamps") { params.no_timestamps = true; }
|
else if (arg == "-nt" || arg == "--no-timestamps") { params.no_timestamps = true; }
|
||||||
else if (arg == "-l" || arg == "--language") { params.language = whisper_param_turn_lowercase(argv[++i]); }
|
else if (arg == "-l" || arg == "--language") { params.language = whisper_param_turn_lowercase(ARGV_NEXT); }
|
||||||
else if (arg == "-dl" || arg == "--detect-language") { params.detect_language = true; }
|
else if (arg == "-dl" || arg == "--detect-language") { params.detect_language = true; }
|
||||||
else if ( arg == "--prompt") { params.prompt = argv[++i]; }
|
else if ( arg == "--prompt") { params.prompt = ARGV_NEXT; }
|
||||||
else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; }
|
else if (arg == "-m" || arg == "--model") { params.model = ARGV_NEXT; }
|
||||||
else if (arg == "-f" || arg == "--file") { params.fname_inp.emplace_back(argv[++i]); }
|
else if (arg == "-f" || arg == "--file") { params.fname_inp.emplace_back(ARGV_NEXT); }
|
||||||
else if (arg == "-oved" || arg == "--ov-e-device") { params.openvino_encode_device = argv[++i]; }
|
else if (arg == "-oved" || arg == "--ov-e-device") { params.openvino_encode_device = ARGV_NEXT; }
|
||||||
else if (arg == "-dtw" || arg == "--dtw") { params.dtw = argv[++i]; }
|
else if (arg == "-dtw" || arg == "--dtw") { params.dtw = ARGV_NEXT; }
|
||||||
else if (arg == "-ls" || arg == "--log-score") { params.log_score = true; }
|
else if (arg == "-ls" || arg == "--log-score") { params.log_score = true; }
|
||||||
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; }
|
||||||
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; }
|
||||||
else if ( arg == "--suppress-regex") { params.suppress_regex = argv[++i]; }
|
else if (arg == "-sns" || arg == "--suppress-nst") { params.suppress_nst = true; }
|
||||||
else if ( arg == "--grammar") { params.grammar = argv[++i]; }
|
else if ( arg == "--suppress-regex") { params.suppress_regex = ARGV_NEXT; }
|
||||||
else if ( arg == "--grammar-rule") { params.grammar_rule = argv[++i]; }
|
else if ( arg == "--grammar") { params.grammar = ARGV_NEXT; }
|
||||||
else if ( arg == "--grammar-penalty") { params.grammar_penalty = std::stof(argv[++i]); }
|
else if ( arg == "--grammar-rule") { params.grammar_rule = ARGV_NEXT; }
|
||||||
|
else if ( arg == "--grammar-penalty") { params.grammar_penalty = std::stof(ARGV_NEXT); }
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
whisper_print_usage(argc, argv, params);
|
whisper_print_usage(argc, argv, params);
|
||||||
@ -182,7 +192,7 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & params) {
|
static void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & params) {
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "usage: %s [options] file0.wav file1.wav ...\n", argv[0]);
|
fprintf(stderr, "usage: %s [options] file0.wav file1.wav ...\n", argv[0]);
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
@ -202,6 +212,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n", params.word_thold);
|
fprintf(stderr, " -wt N, --word-thold N [%-7.2f] word timestamp probability threshold\n", params.word_thold);
|
||||||
fprintf(stderr, " -et N, --entropy-thold N [%-7.2f] entropy threshold for decoder fail\n", params.entropy_thold);
|
fprintf(stderr, " -et N, --entropy-thold N [%-7.2f] entropy threshold for decoder fail\n", params.entropy_thold);
|
||||||
fprintf(stderr, " -lpt N, --logprob-thold N [%-7.2f] log probability threshold for decoder fail\n", params.logprob_thold);
|
fprintf(stderr, " -lpt N, --logprob-thold N [%-7.2f] log probability threshold for decoder fail\n", params.logprob_thold);
|
||||||
|
fprintf(stderr, " -nth N, --no-speech-thold N [%-7.2f] no speech threshold\n", params.no_speech_thold);
|
||||||
fprintf(stderr, " -tp, --temperature N [%-7.2f] The sampling temperature, between 0 and 1\n", params.temperature);
|
fprintf(stderr, " -tp, --temperature N [%-7.2f] The sampling temperature, between 0 and 1\n", params.temperature);
|
||||||
fprintf(stderr, " -tpi, --temperature-inc N [%-7.2f] The increment of temperature, between 0 and 1\n",params.temperature_inc);
|
fprintf(stderr, " -tpi, --temperature-inc N [%-7.2f] The increment of temperature, between 0 and 1\n",params.temperature_inc);
|
||||||
fprintf(stderr, " -debug, --debug-mode [%-7s] enable debug mode (eg. dump log_mel)\n", params.debug_mode ? "true" : "false");
|
fprintf(stderr, " -debug, --debug-mode [%-7s] enable debug mode (eg. dump log_mel)\n", params.debug_mode ? "true" : "false");
|
||||||
@ -234,6 +245,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -ls, --log-score [%-7s] log best decoder scores of tokens\n", params.log_score?"true":"false");
|
fprintf(stderr, " -ls, --log-score [%-7s] log best decoder scores of tokens\n", params.log_score?"true":"false");
|
||||||
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true");
|
||||||
fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention\n", params.flash_attn ? "true" : "false");
|
fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention\n", params.flash_attn ? "true" : "false");
|
||||||
|
fprintf(stderr, " -sns, --suppress-nst [%-7s] suppress non-speech tokens\n", params.suppress_nst ? "true" : "false");
|
||||||
fprintf(stderr, " --suppress-regex REGEX [%-7s] regular expression matching tokens to suppress\n", params.suppress_regex.c_str());
|
fprintf(stderr, " --suppress-regex REGEX [%-7s] regular expression matching tokens to suppress\n", params.suppress_regex.c_str());
|
||||||
fprintf(stderr, " --grammar GRAMMAR [%-7s] GBNF grammar to guide decoding\n", params.grammar.c_str());
|
fprintf(stderr, " --grammar GRAMMAR [%-7s] GBNF grammar to guide decoding\n", params.grammar.c_str());
|
||||||
fprintf(stderr, " --grammar-rule RULE [%-7s] top-level GBNF grammar rule name\n", params.grammar_rule.c_str());
|
fprintf(stderr, " --grammar-rule RULE [%-7s] top-level GBNF grammar rule name\n", params.grammar_rule.c_str());
|
||||||
@ -248,7 +260,7 @@ struct whisper_print_user_data {
|
|||||||
int progress_prev;
|
int progress_prev;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::string estimate_diarization_speaker(std::vector<std::vector<float>> pcmf32s, int64_t t0, int64_t t1, bool id_only = false) {
|
static std::string estimate_diarization_speaker(std::vector<std::vector<float>> pcmf32s, int64_t t0, int64_t t1, bool id_only = false) {
|
||||||
std::string speaker = "";
|
std::string speaker = "";
|
||||||
const int64_t n_samples = pcmf32s[0].size();
|
const int64_t n_samples = pcmf32s[0].size();
|
||||||
|
|
||||||
@ -280,7 +292,8 @@ std::string estimate_diarization_speaker(std::vector<std::vector<float>> pcmf32s
|
|||||||
|
|
||||||
return speaker;
|
return speaker;
|
||||||
}
|
}
|
||||||
void whisper_print_progress_callback(struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, int progress, void * user_data) {
|
|
||||||
|
static void whisper_print_progress_callback(struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, int progress, void * user_data) {
|
||||||
int progress_step = ((whisper_print_user_data *) user_data)->params->progress_step;
|
int progress_step = ((whisper_print_user_data *) user_data)->params->progress_step;
|
||||||
int * progress_prev = &(((whisper_print_user_data *) user_data)->progress_prev);
|
int * progress_prev = &(((whisper_print_user_data *) user_data)->progress_prev);
|
||||||
if (progress >= *progress_prev + progress_step) {
|
if (progress >= *progress_prev + progress_step) {
|
||||||
@ -289,7 +302,7 @@ void whisper_print_progress_callback(struct whisper_context * /*ctx*/, struct wh
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper_state * /*state*/, int n_new, void * user_data) {
|
static void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper_state * /*state*/, int n_new, void * user_data) {
|
||||||
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
||||||
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
||||||
|
|
||||||
@ -358,7 +371,7 @@ void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_txt(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
static bool output_txt(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
if (!fout.is_open()) {
|
if (!fout.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
||||||
@ -385,7 +398,7 @@ bool output_txt(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_vtt(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
static bool output_vtt(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
if (!fout.is_open()) {
|
if (!fout.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
||||||
@ -417,7 +430,7 @@ bool output_vtt(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_srt(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
static bool output_srt(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
if (!fout.is_open()) {
|
if (!fout.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
||||||
@ -446,7 +459,7 @@ bool output_srt(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
char *escape_double_quotes_and_backslashes(const char *str) {
|
static char * escape_double_quotes_and_backslashes(const char * str) {
|
||||||
if (str == NULL) {
|
if (str == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -459,7 +472,7 @@ char *escape_double_quotes_and_backslashes(const char *str) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
char *escaped = (char *)calloc(escaped_length, 1); // pre-zeroed
|
char * escaped = (char *)calloc(escaped_length, 1); // pre-zeroed
|
||||||
if (escaped == NULL) {
|
if (escaped == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -478,7 +491,7 @@ char *escape_double_quotes_and_backslashes(const char *str) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// double quote should be escaped by another double quote. (rfc4180)
|
// double quote should be escaped by another double quote. (rfc4180)
|
||||||
char *escape_double_quotes_in_csv(const char *str) {
|
static char * escape_double_quotes_in_csv(const char * str) {
|
||||||
if (str == NULL) {
|
if (str == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -509,7 +522,7 @@ char *escape_double_quotes_in_csv(const char *str) {
|
|||||||
return escaped;
|
return escaped;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_csv(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
static bool output_csv(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
if (!fout.is_open()) {
|
if (!fout.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
||||||
@ -544,7 +557,7 @@ bool output_csv(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_score(struct whisper_context * ctx, const char * fname, const whisper_params & /*params*/, std::vector<std::vector<float>> /*pcmf32s*/) {
|
static bool output_score(struct whisper_context * ctx, const char * fname, const whisper_params & /*params*/, std::vector<std::vector<float>> /*pcmf32s*/) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
||||||
|
|
||||||
@ -563,7 +576,7 @@ bool output_score(struct whisper_context * ctx, const char * fname, const whispe
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_json(
|
static bool output_json(
|
||||||
struct whisper_context * ctx,
|
struct whisper_context * ctx,
|
||||||
const char * fname,
|
const char * fname,
|
||||||
const whisper_params & params,
|
const whisper_params & params,
|
||||||
@ -734,7 +747,7 @@ bool output_json(
|
|||||||
// karaoke video generation
|
// karaoke video generation
|
||||||
// outputs a bash script that uses ffmpeg to generate a video with the subtitles
|
// outputs a bash script that uses ffmpeg to generate a video with the subtitles
|
||||||
// TODO: font parameter adjustments
|
// TODO: font parameter adjustments
|
||||||
bool output_wts(struct whisper_context * ctx, const char * fname, const char * fname_inp, const whisper_params & params, float t_sec, std::vector<std::vector<float>> pcmf32s) {
|
static bool output_wts(struct whisper_context * ctx, const char * fname, const char * fname_inp, const whisper_params & params, float t_sec, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
|
|
||||||
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
||||||
@ -859,7 +872,7 @@ bool output_wts(struct whisper_context * ctx, const char * fname, const char * f
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool output_lrc(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
static bool output_lrc(struct whisper_context * ctx, const char * fname, const whisper_params & params, std::vector<std::vector<float>> pcmf32s) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
if (!fout.is_open()) {
|
if (!fout.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname);
|
||||||
@ -900,7 +913,7 @@ bool output_lrc(struct whisper_context * ctx, const char * fname, const whisper_
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void cb_log_disable(enum ggml_log_level , const char * , void * ) { }
|
static void cb_log_disable(enum ggml_log_level , const char * , void * ) { }
|
||||||
|
|
||||||
int main(int argc, char ** argv) {
|
int main(int argc, char ** argv) {
|
||||||
whisper_params params;
|
whisper_params params;
|
||||||
@ -996,6 +1009,7 @@ int main(int argc, char ** argv) {
|
|||||||
if (params.dtw == "large.v1") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V1;
|
if (params.dtw == "large.v1") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V1;
|
||||||
if (params.dtw == "large.v2") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V2;
|
if (params.dtw == "large.v2") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V2;
|
||||||
if (params.dtw == "large.v3") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V3;
|
if (params.dtw == "large.v3") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V3;
|
||||||
|
if (params.dtw == "large.v3.turbo") cparams.dtw_aheads_preset = WHISPER_AHEADS_LARGE_V3_TURBO;
|
||||||
|
|
||||||
if (cparams.dtw_aheads_preset == WHISPER_AHEADS_NONE) {
|
if (cparams.dtw_aheads_preset == WHISPER_AHEADS_NONE) {
|
||||||
fprintf(stderr, "error: unknown DTW preset '%s'\n", params.dtw.c_str());
|
fprintf(stderr, "error: unknown DTW preset '%s'\n", params.dtw.c_str());
|
||||||
@ -1119,9 +1133,12 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
wparams.entropy_thold = params.entropy_thold;
|
wparams.entropy_thold = params.entropy_thold;
|
||||||
wparams.logprob_thold = params.logprob_thold;
|
wparams.logprob_thold = params.logprob_thold;
|
||||||
|
wparams.no_speech_thold = params.no_speech_thold;
|
||||||
|
|
||||||
wparams.no_timestamps = params.no_timestamps;
|
wparams.no_timestamps = params.no_timestamps;
|
||||||
|
|
||||||
|
wparams.suppress_nst = params.suppress_nst;
|
||||||
|
|
||||||
whisper_print_user_data user_data = { ¶ms, &pcmf32s, 0 };
|
whisper_print_user_data user_data = { ¶ms, &pcmf32s, 0 };
|
||||||
|
|
||||||
const auto & grammar_parsed = params.grammar_parsed;
|
const auto & grammar_parsed = params.grammar_parsed;
|
@ -1,9 +1,10 @@
|
|||||||
if (WHISPER_SDL2)
|
if (WHISPER_SDL2)
|
||||||
# command
|
set(TARGET whisper-command)
|
||||||
set(TARGET command)
|
|
||||||
add_executable(${TARGET} command.cpp)
|
add_executable(${TARGET} command.cpp)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -1,14 +1,14 @@
|
|||||||
# command
|
# whisper.cpp/examples/command
|
||||||
|
|
||||||
This is a basic Voice Assistant example that accepts voice commands from the microphone.
|
This is a basic Voice Assistant example that accepts voice commands from the microphone.
|
||||||
More info is available in [issue #171](https://github.com/ggerganov/whisper.cpp/issues/171).
|
More info is available in [issue #171](https://github.com/ggerganov/whisper.cpp/issues/171).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Run with default arguments and small model
|
# Run with default arguments and small model
|
||||||
./command -m ./models/ggml-small.en.bin -t 8
|
./whisper-command -m ./models/ggml-small.en.bin -t 8
|
||||||
|
|
||||||
# On Raspberry Pi, use tiny or base models + "-ac 768" for better performance
|
# On Raspberry Pi, use tiny or base models + "-ac 768" for better performance
|
||||||
./command -m ./models/ggml-tiny.en.bin -ac 768 -t 3 -c 0
|
./whisper-command -m ./models/ggml-tiny.en.bin -ac 768 -t 3 -c 0
|
||||||
```
|
```
|
||||||
|
|
||||||
https://user-images.githubusercontent.com/1991296/204038393-2f846eae-c255-4099-a76d-5735c25c49da.mp4
|
https://user-images.githubusercontent.com/1991296/204038393-2f846eae-c255-4099-a76d-5735c25c49da.mp4
|
||||||
@ -23,10 +23,10 @@ Initial tests show that this approach might be extremely efficient in terms of p
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Run in guided mode, the list of allowed commands is in commands.txt
|
# Run in guided mode, the list of allowed commands is in commands.txt
|
||||||
./command -m ./models/ggml-base.en.bin -cmd ./examples/command/commands.txt
|
./whisper-command -m ./models/ggml-base.en.bin -cmd ./examples/command/commands.txt
|
||||||
|
|
||||||
# On Raspberry Pi, in guided mode you can use "-ac 128" for extra performance
|
# On Raspberry Pi, in guided mode you can use "-ac 128" for extra performance
|
||||||
./command -m ./models/ggml-tiny.en.bin -cmd ./examples/command/commands.txt -ac 128 -t 3 -c 0
|
./whisper-command -m ./models/ggml-tiny.en.bin -cmd ./examples/command/commands.txt -ac 128 -t 3 -c 0
|
||||||
```
|
```
|
||||||
|
|
||||||
https://user-images.githubusercontent.com/1991296/207435352-8fc4ed3f-bde5-4555-9b8b-aeeb76bee969.mp4
|
https://user-images.githubusercontent.com/1991296/207435352-8fc4ed3f-bde5-4555-9b8b-aeeb76bee969.mp4
|
||||||
@ -34,7 +34,7 @@ https://user-images.githubusercontent.com/1991296/207435352-8fc4ed3f-bde5-4555-9
|
|||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
The `command` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
The `whisper-command` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install SDL2
|
# Install SDL2
|
||||||
@ -47,5 +47,6 @@ sudo dnf install SDL2 SDL2-devel
|
|||||||
# Install SDL2 on Mac OS
|
# Install SDL2 on Mac OS
|
||||||
brew install sdl2
|
brew install sdl2
|
||||||
|
|
||||||
make command
|
cmake -B build -DWHISPER_SDL2=ON
|
||||||
|
cmake --build build --config Release
|
||||||
```
|
```
|
||||||
|
@ -59,7 +59,7 @@ struct whisper_params {
|
|||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
|
|
||||||
bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
|
|
||||||
@ -130,7 +130,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string transcribe(
|
static std::string transcribe(
|
||||||
whisper_context * ctx,
|
whisper_context * ctx,
|
||||||
const whisper_params & params,
|
const whisper_params & params,
|
||||||
const std::vector<float> & pcmf32,
|
const std::vector<float> & pcmf32,
|
||||||
@ -216,7 +216,7 @@ std::string transcribe(
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::string> read_allowed_commands(const std::string & fname) {
|
static std::vector<std::string> read_allowed_commands(const std::string & fname) {
|
||||||
std::vector<std::string> allowed_commands;
|
std::vector<std::string> allowed_commands;
|
||||||
|
|
||||||
std::ifstream ifs(fname);
|
std::ifstream ifs(fname);
|
||||||
@ -238,7 +238,7 @@ std::vector<std::string> read_allowed_commands(const std::string & fname) {
|
|||||||
return allowed_commands;
|
return allowed_commands;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::string> get_words(const std::string &txt) {
|
static std::vector<std::string> get_words(const std::string &txt) {
|
||||||
std::vector<std::string> words;
|
std::vector<std::string> words;
|
||||||
|
|
||||||
std::istringstream iss(txt);
|
std::istringstream iss(txt);
|
||||||
@ -252,7 +252,7 @@ std::vector<std::string> get_words(const std::string &txt) {
|
|||||||
|
|
||||||
// command-list mode
|
// command-list mode
|
||||||
// guide the transcription to match the most likely command from a provided list
|
// guide the transcription to match the most likely command from a provided list
|
||||||
int process_command_list(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms) {
|
static int process_command_list(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms) {
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "%s: guided mode\n", __func__);
|
fprintf(stderr, "%s: guided mode\n", __func__);
|
||||||
|
|
||||||
@ -463,7 +463,7 @@ int process_command_list(struct whisper_context * ctx, audio_async &audio, const
|
|||||||
|
|
||||||
// always-prompt mode
|
// always-prompt mode
|
||||||
// transcribe the voice into text after valid prompt
|
// transcribe the voice into text after valid prompt
|
||||||
int always_prompt_transcription(struct whisper_context * ctx, audio_async & audio, const whisper_params & params) {
|
static int always_prompt_transcription(struct whisper_context * ctx, audio_async & audio, const whisper_params & params) {
|
||||||
bool is_running = true;
|
bool is_running = true;
|
||||||
bool ask_prompt = true;
|
bool ask_prompt = true;
|
||||||
|
|
||||||
@ -543,7 +543,7 @@ int always_prompt_transcription(struct whisper_context * ctx, audio_async & audi
|
|||||||
|
|
||||||
// general-purpose mode
|
// general-purpose mode
|
||||||
// freely transcribe the voice into text
|
// freely transcribe the voice into text
|
||||||
int process_general_transcription(struct whisper_context * ctx, audio_async & audio, const whisper_params & params) {
|
static int process_general_transcription(struct whisper_context * ctx, audio_async & audio, const whisper_params & params) {
|
||||||
bool is_running = true;
|
bool is_running = true;
|
||||||
bool have_prompt = false;
|
bool have_prompt = false;
|
||||||
bool ask_prompt = true;
|
bool ask_prompt = true;
|
||||||
|
@ -209,6 +209,8 @@ bool ggml_common_quantize_0(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ1_M:
|
case GGML_TYPE_IQ1_M:
|
||||||
case GGML_TYPE_BF16:
|
case GGML_TYPE_BF16:
|
||||||
|
case GGML_TYPE_TQ1_0:
|
||||||
|
case GGML_TYPE_TQ2_0:
|
||||||
case GGML_TYPE_COUNT:
|
case GGML_TYPE_COUNT:
|
||||||
{
|
{
|
||||||
fprintf(stderr, "%s: unsupported quantization type %d (%s)\n", __func__, ttype, ggml_type_name((ggml_type) ttype));
|
fprintf(stderr, "%s: unsupported quantization type %d (%s)\n", __func__, ttype, ggml_type_name((ggml_type) ttype));
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#include "common-sdl.h"
|
#include "common-sdl.h"
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
|
|
||||||
audio_async::audio_async(int len_ms) {
|
audio_async::audio_async(int len_ms) {
|
||||||
m_len_ms = len_ms;
|
m_len_ms = len_ms;
|
||||||
|
|
||||||
@ -219,7 +221,7 @@ bool sdl_poll_events() {
|
|||||||
case SDL_QUIT:
|
case SDL_QUIT:
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
} break;
|
}
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ extern bool ffmpeg_decode_audio(const std::string & ifname, std::vector<uint8_t>
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Function to check if the next argument exists
|
// Function to check if the next argument exists
|
||||||
std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) {
|
static std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) {
|
||||||
if (i + 1 < argc && argv[i + 1][0] != '-') {
|
if (i + 1 < argc && argv[i + 1][0] != '-') {
|
||||||
return argv[++i];
|
return argv[++i];
|
||||||
} else {
|
} else {
|
||||||
@ -147,7 +147,6 @@ std::string gpt_random_prompt(std::mt19937 & rng) {
|
|||||||
case 7: return "He";
|
case 7: return "He";
|
||||||
case 8: return "She";
|
case 8: return "She";
|
||||||
case 9: return "They";
|
case 9: return "They";
|
||||||
default: return "To";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return "The";
|
return "The";
|
||||||
@ -346,7 +345,7 @@ std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::stri
|
|||||||
return tokens;
|
return tokens;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<gpt_vocab::id> parse_tokens_from_string(const std::string& input, char delimiter) {
|
static std::vector<gpt_vocab::id> parse_tokens_from_string(const std::string& input, char delimiter) {
|
||||||
std::vector<gpt_vocab::id> output;
|
std::vector<gpt_vocab::id> output;
|
||||||
std::stringstream ss(input);
|
std::stringstream ss(input);
|
||||||
std::string token;
|
std::string token;
|
||||||
@ -358,7 +357,7 @@ std::vector<gpt_vocab::id> parse_tokens_from_string(const std::string& input, ch
|
|||||||
return output;
|
return output;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::map<std::string, std::vector<gpt_vocab::id>> extract_tests_from_file(const std::string & fpath_test){
|
static std::map<std::string, std::vector<gpt_vocab::id>> extract_tests_from_file(const std::string & fpath_test){
|
||||||
if (fpath_test.empty()){
|
if (fpath_test.empty()){
|
||||||
fprintf(stderr, "%s : No test file found.\n", __func__);
|
fprintf(stderr, "%s : No test file found.\n", __func__);
|
||||||
return std::map<std::string, std::vector<gpt_vocab::id>>();
|
return std::map<std::string, std::vector<gpt_vocab::id>>();
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <thread>
|
#include <thread>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
#define COMMON_SAMPLE_RATE 16000
|
#define COMMON_SAMPLE_RATE 16000
|
||||||
|
|
||||||
@ -21,7 +22,7 @@ struct gpt_params {
|
|||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
||||||
int32_t n_predict = 200; // new tokens to predict
|
int32_t n_predict = 200; // new tokens to predict
|
||||||
int32_t n_parallel = 1; // number of parallel streams
|
int32_t n_parallel = 1; // number of parallel streams
|
||||||
int32_t n_batch = 8; // batch size for prompt processing
|
int32_t n_batch = 32; // batch size for prompt processing
|
||||||
int32_t n_ctx = 2048; // context size (this is the KV cache max size)
|
int32_t n_ctx = 2048; // context size (this is the KV cache max size)
|
||||||
int32_t n_gpu_layers = 0; // number of layers to offlload to the GPU
|
int32_t n_gpu_layers = 0; // number of layers to offlload to the GPU
|
||||||
|
|
||||||
@ -286,12 +287,43 @@ void sam_print_usage(int argc, char ** argv, const sam_params & params);
|
|||||||
// Terminal utils
|
// Terminal utils
|
||||||
//
|
//
|
||||||
|
|
||||||
|
#define SQR(X) ((X) * (X))
|
||||||
|
#define UNCUBE(x) x < 48 ? 0 : x < 115 ? 1 : (x - 35) / 40
|
||||||
|
|
||||||
// Terminal color map. 10 colors grouped in ranges [0.0, 0.1, ..., 0.9]
|
/**
|
||||||
// Lowest is red, middle is yellow, highest is green.
|
* Quantizes 24-bit RGB to xterm256 code range [16,256).
|
||||||
|
*/
|
||||||
|
static int rgb2xterm256(int r, int g, int b) {
|
||||||
|
unsigned char cube[] = {0, 0137, 0207, 0257, 0327, 0377};
|
||||||
|
int av, ir, ig, ib, il, qr, qg, qb, ql;
|
||||||
|
av = r * .299 + g * .587 + b * .114 + .5;
|
||||||
|
ql = (il = av > 238 ? 23 : (av - 3) / 10) * 10 + 8;
|
||||||
|
qr = cube[(ir = UNCUBE(r))];
|
||||||
|
qg = cube[(ig = UNCUBE(g))];
|
||||||
|
qb = cube[(ib = UNCUBE(b))];
|
||||||
|
if (SQR(qr - r) + SQR(qg - g) + SQR(qb - b) <=
|
||||||
|
SQR(ql - r) + SQR(ql - g) + SQR(ql - b))
|
||||||
|
return ir * 36 + ig * 6 + ib + 020;
|
||||||
|
return il + 0350;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string set_xterm256_foreground(int r, int g, int b) {
|
||||||
|
int x = rgb2xterm256(r, g, b);
|
||||||
|
std::ostringstream oss;
|
||||||
|
oss << "\033[38;5;" << x << "m";
|
||||||
|
return oss.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lowest is red, middle is yellow, highest is green. Color scheme from
|
||||||
|
// Paul Tol; it is colorblind friendly https://personal.sron.nl/~pault/
|
||||||
const std::vector<std::string> k_colors = {
|
const std::vector<std::string> k_colors = {
|
||||||
"\033[38;5;196m", "\033[38;5;202m", "\033[38;5;208m", "\033[38;5;214m", "\033[38;5;220m",
|
set_xterm256_foreground(220, 5, 12),
|
||||||
"\033[38;5;226m", "\033[38;5;190m", "\033[38;5;154m", "\033[38;5;118m", "\033[38;5;82m",
|
set_xterm256_foreground(232, 96, 28),
|
||||||
|
set_xterm256_foreground(241, 147, 45),
|
||||||
|
set_xterm256_foreground(246, 193, 65),
|
||||||
|
set_xterm256_foreground(247, 240, 86),
|
||||||
|
set_xterm256_foreground(144, 201, 135),
|
||||||
|
set_xterm256_foreground( 78, 178, 101),
|
||||||
};
|
};
|
||||||
|
|
||||||
//
|
//
|
||||||
|
4
examples/deprecation-warning/CMakeLists.txt
Normal file
4
examples/deprecation-warning/CMakeLists.txt
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
add_executable(main ./deprecation-warning.cpp)
|
||||||
|
add_executable(bench ./deprecation-warning.cpp)
|
||||||
|
add_executable(stream ./deprecation-warning.cpp)
|
||||||
|
add_executable(command ./deprecation-warning.cpp)
|
17
examples/deprecation-warning/README.md
Normal file
17
examples/deprecation-warning/README.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Migration notice for binary filenames
|
||||||
|
|
||||||
|
> [!IMPORTANT]
|
||||||
|
[2024 Dec 20] Binaries have been renamed w/ a `whisper-` prefix. `main` is now `whisper-cli`, `server` is `whisper-server`, etc (https://github.com/ggerganov/whisper.cpp/pull/2648)
|
||||||
|
|
||||||
|
This migration was important, but it is a breaking change that may not always be immediately obvious to users.
|
||||||
|
|
||||||
|
Please update all scripts and workflows to use the new binary names.
|
||||||
|
|
||||||
|
| Old Filename | New Filename |
|
||||||
|
| ---- | ---- |
|
||||||
|
| main | whisper-cli |
|
||||||
|
| bench | whisper-bench |
|
||||||
|
| stream | whisper-stream |
|
||||||
|
| command | whisper-command |
|
||||||
|
| server | whisper-server |
|
||||||
|
| talk-llama | whisper-talk-llama |
|
38
examples/deprecation-warning/deprecation-warning.cpp
Normal file
38
examples/deprecation-warning/deprecation-warning.cpp
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// Warns users that this filename was deprecated, and provides a link for more information.
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
// Main
|
||||||
|
int main(int argc, char** argv) {
|
||||||
|
std::string filename = "main";
|
||||||
|
if (argc >= 1) {
|
||||||
|
filename = argv[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get only the program name from the full path
|
||||||
|
size_t pos = filename.find_last_of("/\\");
|
||||||
|
if (pos != std::string::npos) {
|
||||||
|
filename = filename.substr(pos+1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append "whisper-" to the beginning of filename to get the replacemnt filename
|
||||||
|
std::string replacement_filename = "whisper-" + filename;
|
||||||
|
|
||||||
|
// The exception is if the filename is "main", then our replacement filename is "whisper-cli"
|
||||||
|
if (filename == "main") {
|
||||||
|
replacement_filename = "whisper-cli";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (filename == "main.exe") {
|
||||||
|
replacement_filename = "whisper-cli.exe";
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stdout, "\n");
|
||||||
|
fprintf(stdout, "WARNING: The binary '%s' is deprecated.\n", filename.c_str());
|
||||||
|
fprintf(stdout, " Please use '%s' instead.\n", replacement_filename.c_str());
|
||||||
|
fprintf(stdout, " See https://github.com/ggerganov/whisper.cpp/tree/master/examples/deprecation-warning/README.md for more information.\n");
|
||||||
|
fprintf(stdout, "\n");
|
||||||
|
|
||||||
|
return EXIT_FAILURE;
|
||||||
|
}
|
4513
examples/dr_wav.h
4513
examples/dr_wav.h
File diff suppressed because it is too large
Load Diff
@ -204,8 +204,6 @@ static int decode_audio(struct audio_buffer *audio_buf, s16 **data, int *size)
|
|||||||
const size_t errbuffsize = 1024;
|
const size_t errbuffsize = 1024;
|
||||||
char errbuff[errbuffsize];
|
char errbuff[errbuffsize];
|
||||||
|
|
||||||
av_register_all(); // from avformat. Still a must-have call for ffmpeg v3! (can be skipped for later versions)
|
|
||||||
|
|
||||||
fmt_ctx = avformat_alloc_context();
|
fmt_ctx = avformat_alloc_context();
|
||||||
avio_ctx_buffer = (u8*)av_malloc(AVIO_CTX_BUF_SZ);
|
avio_ctx_buffer = (u8*)av_malloc(AVIO_CTX_BUF_SZ);
|
||||||
LOG("Creating an avio context: AVIO_CTX_BUF_SZ=%d\n", AVIO_CTX_BUF_SZ);
|
LOG("Creating an avio context: AVIO_CTX_BUF_SZ=%d\n", AVIO_CTX_BUF_SZ);
|
||||||
@ -321,7 +319,7 @@ int ffmpeg_decode_audio(const std::string &ifname, std::vector<uint8_t>& owav_da
|
|||||||
LOG("Couldn't map input file %s\n", ifname.c_str());
|
LOG("Couldn't map input file %s\n", ifname.c_str());
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
LOG("Mapped input file: %x size: %d\n", ibuf, ibuf_size);
|
LOG("Mapped input file: %s size: %d\n", ibuf, (int) ibuf_size);
|
||||||
struct audio_buffer inaudio_buf;
|
struct audio_buffer inaudio_buf;
|
||||||
inaudio_buf.ptr = ibuf;
|
inaudio_buf.ptr = ibuf;
|
||||||
inaudio_buf.size = ibuf_size;
|
inaudio_buf.size = ibuf_size;
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
# Press Ctrl+C to stop recording
|
# Press Ctrl+C to stop recording
|
||||||
#
|
#
|
||||||
|
|
||||||
executable="./main"
|
executable="./build/bin/whisper-cli"
|
||||||
model="base.en"
|
model="base.en"
|
||||||
model_path="models/ggml-$model.bin"
|
model_path="models/ggml-$model.bin"
|
||||||
|
|
||||||
@ -46,7 +46,7 @@ ffmpeg -y -i ./rec.wav -ar 16000 -ac 1 -c:a pcm_s16le ./rec16.wav > /dev/null 2>
|
|||||||
|
|
||||||
# run Whisper
|
# run Whisper
|
||||||
echo "Processing ..."
|
echo "Processing ..."
|
||||||
./main -m models/ggml-base.en.bin rec16.wav -owts > /dev/null 2>&1
|
${executable} -m models/ggml-base.en.bin rec16.wav -owts > /dev/null 2>&1
|
||||||
|
|
||||||
# generate Karaoke video
|
# generate Karaoke video
|
||||||
echo "Generating video ..."
|
echo "Generating video ..."
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
namespace grammar_parser {
|
namespace grammar_parser {
|
||||||
// NOTE: assumes valid utf8 (but checks for overrun)
|
// NOTE: assumes valid utf8 (but checks for overrun)
|
||||||
// copied from whisper.cpp
|
// copied from whisper.cpp
|
||||||
std::pair<uint32_t, const char *> decode_utf8(const char * src) {
|
static std::pair<uint32_t, const char *> decode_utf8(const char * src) {
|
||||||
static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
|
static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
|
||||||
uint8_t first_byte = static_cast<uint8_t>(*src);
|
uint8_t first_byte = static_cast<uint8_t>(*src);
|
||||||
uint8_t highbits = first_byte >> 4;
|
uint8_t highbits = first_byte >> 4;
|
||||||
@ -24,19 +24,19 @@ namespace grammar_parser {
|
|||||||
return std::make_pair(value, pos);
|
return std::make_pair(value, pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t get_symbol_id(parse_state & state, const char * src, size_t len) {
|
static uint32_t get_symbol_id(parse_state & state, const char * src, size_t len) {
|
||||||
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
||||||
auto result = state.symbol_ids.insert(std::make_pair(std::string(src, len), next_id));
|
auto result = state.symbol_ids.insert(std::make_pair(std::string(src, len), next_id));
|
||||||
return result.first->second;
|
return result.first->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t generate_symbol_id(parse_state & state, const std::string & base_name) {
|
static uint32_t generate_symbol_id(parse_state & state, const std::string & base_name) {
|
||||||
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
uint32_t next_id = static_cast<uint32_t>(state.symbol_ids.size());
|
||||||
state.symbol_ids[base_name + '_' + std::to_string(next_id)] = next_id;
|
state.symbol_ids[base_name + '_' + std::to_string(next_id)] = next_id;
|
||||||
return next_id;
|
return next_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
void add_rule(
|
static void add_rule(
|
||||||
parse_state & state,
|
parse_state & state,
|
||||||
uint32_t rule_id,
|
uint32_t rule_id,
|
||||||
const std::vector<whisper_grammar_element> & rule) {
|
const std::vector<whisper_grammar_element> & rule) {
|
||||||
@ -46,11 +46,11 @@ namespace grammar_parser {
|
|||||||
state.rules[rule_id] = rule;
|
state.rules[rule_id] = rule;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_word_char(char c) {
|
static bool is_word_char(char c) {
|
||||||
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '-' || ('0' <= c && c <= '9');
|
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '-' || ('0' <= c && c <= '9');
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<uint32_t, const char *> parse_hex(const char * src, int size) {
|
static std::pair<uint32_t, const char *> parse_hex(const char * src, int size) {
|
||||||
const char * pos = src;
|
const char * pos = src;
|
||||||
const char * end = src + size;
|
const char * end = src + size;
|
||||||
uint32_t value = 0;
|
uint32_t value = 0;
|
||||||
@ -73,7 +73,7 @@ namespace grammar_parser {
|
|||||||
return std::make_pair(value, pos);
|
return std::make_pair(value, pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * parse_space(const char * src, bool newline_ok) {
|
static const char * parse_space(const char * src, bool newline_ok) {
|
||||||
const char * pos = src;
|
const char * pos = src;
|
||||||
while (*pos == ' ' || *pos == '\t' || *pos == '#' ||
|
while (*pos == ' ' || *pos == '\t' || *pos == '#' ||
|
||||||
(newline_ok && (*pos == '\r' || *pos == '\n'))) {
|
(newline_ok && (*pos == '\r' || *pos == '\n'))) {
|
||||||
@ -88,7 +88,7 @@ namespace grammar_parser {
|
|||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * parse_name(const char * src) {
|
static const char * parse_name(const char * src) {
|
||||||
const char * pos = src;
|
const char * pos = src;
|
||||||
while (is_word_char(*pos)) {
|
while (is_word_char(*pos)) {
|
||||||
pos++;
|
pos++;
|
||||||
@ -99,7 +99,7 @@ namespace grammar_parser {
|
|||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<uint32_t, const char *> parse_char(const char * src) {
|
static std::pair<uint32_t, const char *> parse_char(const char * src) {
|
||||||
if (*src == '\\') {
|
if (*src == '\\') {
|
||||||
switch (src[1]) {
|
switch (src[1]) {
|
||||||
case 'x': return parse_hex(src + 2, 2);
|
case 'x': return parse_hex(src + 2, 2);
|
||||||
@ -122,14 +122,14 @@ namespace grammar_parser {
|
|||||||
throw std::runtime_error("unexpected end of input");
|
throw std::runtime_error("unexpected end of input");
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * parse_alternates(
|
static const char * parse_alternates(
|
||||||
parse_state & state,
|
parse_state & state,
|
||||||
const char * src,
|
const char * src,
|
||||||
const std::string & rule_name,
|
const std::string & rule_name,
|
||||||
uint32_t rule_id,
|
uint32_t rule_id,
|
||||||
bool is_nested);
|
bool is_nested);
|
||||||
|
|
||||||
const char * parse_sequence(
|
static const char * parse_sequence(
|
||||||
parse_state & state,
|
parse_state & state,
|
||||||
const char * src,
|
const char * src,
|
||||||
const std::string & rule_name,
|
const std::string & rule_name,
|
||||||
@ -229,7 +229,7 @@ namespace grammar_parser {
|
|||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * parse_alternates(
|
static const char * parse_alternates(
|
||||||
parse_state & state,
|
parse_state & state,
|
||||||
const char * src,
|
const char * src,
|
||||||
const std::string & rule_name,
|
const std::string & rule_name,
|
||||||
@ -247,7 +247,7 @@ namespace grammar_parser {
|
|||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * parse_rule(parse_state & state, const char * src) {
|
static const char * parse_rule(parse_state & state, const char * src) {
|
||||||
const char * name_end = parse_name(src);
|
const char * name_end = parse_name(src);
|
||||||
const char * pos = parse_space(name_end, false);
|
const char * pos = parse_space(name_end, false);
|
||||||
size_t name_len = name_end - src;
|
size_t name_len = name_end - src;
|
||||||
@ -285,7 +285,7 @@ namespace grammar_parser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_grammar_char(FILE * file, uint32_t c) {
|
static void print_grammar_char(FILE * file, uint32_t c) {
|
||||||
if (0x20 <= c && c <= 0x7f) {
|
if (0x20 <= c && c <= 0x7f) {
|
||||||
fprintf(file, "%c", static_cast<char>(c));
|
fprintf(file, "%c", static_cast<char>(c));
|
||||||
} else {
|
} else {
|
||||||
@ -294,7 +294,7 @@ namespace grammar_parser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_char_element(whisper_grammar_element elem) {
|
static bool is_char_element(whisper_grammar_element elem) {
|
||||||
switch (elem.type) {
|
switch (elem.type) {
|
||||||
case WHISPER_GRETYPE_CHAR: return true;
|
case WHISPER_GRETYPE_CHAR: return true;
|
||||||
case WHISPER_GRETYPE_CHAR_NOT: return true;
|
case WHISPER_GRETYPE_CHAR_NOT: return true;
|
||||||
@ -304,7 +304,7 @@ namespace grammar_parser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_rule_binary(FILE * file, const std::vector<whisper_grammar_element> & rule) {
|
static void print_rule_binary(FILE * file, const std::vector<whisper_grammar_element> & rule) {
|
||||||
for (auto elem : rule) {
|
for (auto elem : rule) {
|
||||||
switch (elem.type) {
|
switch (elem.type) {
|
||||||
case WHISPER_GRETYPE_END: fprintf(file, "END"); break;
|
case WHISPER_GRETYPE_END: fprintf(file, "END"); break;
|
||||||
@ -334,7 +334,7 @@ namespace grammar_parser {
|
|||||||
fprintf(file, "\n");
|
fprintf(file, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_rule(
|
static void print_rule(
|
||||||
FILE * file,
|
FILE * file,
|
||||||
uint32_t rule_id,
|
uint32_t rule_id,
|
||||||
const std::vector<whisper_grammar_element> & rule,
|
const std::vector<whisper_grammar_element> & rule,
|
||||||
@ -413,7 +413,7 @@ namespace grammar_parser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<const whisper_grammar_element *> parse_state::c_rules() const{
|
std::vector<const whisper_grammar_element *> parse_state::c_rules() const {
|
||||||
std::vector<const whisper_grammar_element *> ret;
|
std::vector<const whisper_grammar_element *> ret;
|
||||||
for (const auto & rule : rules) {
|
for (const auto & rule : rules) {
|
||||||
ret.push_back(rule.data());
|
ret.push_back(rule.data());
|
||||||
|
@ -14,7 +14,7 @@ model="base.en"
|
|||||||
|
|
||||||
check_requirements()
|
check_requirements()
|
||||||
{
|
{
|
||||||
if ! command -v ./main &>/dev/null; then
|
if ! command -v ./build/bin/whisper-cli &>/dev/null; then
|
||||||
echo "whisper.cpp main executable is required (make)"
|
echo "whisper.cpp main executable is required (make)"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@ -48,7 +48,7 @@ if [ -n "$3" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Whisper models
|
# Whisper models
|
||||||
models=( "tiny.en" "tiny" "base.en" "base" "small.en" "small" "medium.en" "medium" "large-v1" "large-v2" "large-v3" )
|
models=( "tiny.en" "tiny" "base.en" "base" "small.en" "small" "medium.en" "medium" "large-v1" "large-v2" "large-v3" "large-v3-turbo" )
|
||||||
|
|
||||||
# list available models
|
# list available models
|
||||||
function list_models {
|
function list_models {
|
||||||
@ -100,7 +100,7 @@ while [ $running -eq 1 ]; do
|
|||||||
err=$(cat /tmp/whisper-live.err | wc -l)
|
err=$(cat /tmp/whisper-live.err | wc -l)
|
||||||
done
|
done
|
||||||
|
|
||||||
./main -t 8 -m ./models/ggml-${model}.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1
|
./build/bin/whisper-cli -t 8 -m ./models/ggml-${model}.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1
|
||||||
|
|
||||||
while [ $SECONDS -lt $((($i+1)*$step_s)) ]; do
|
while [ $SECONDS -lt $((($i+1)*$step_s)) ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
@ -109,4 +109,4 @@ while [ $running -eq 1 ]; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
killall -v ffmpeg
|
killall -v ffmpeg
|
||||||
killall -v main
|
killall -v whisper-cli
|
||||||
|
@ -53,7 +53,7 @@ struct commandset {
|
|||||||
|
|
||||||
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
void whisper_print_usage(int argc, char ** argv, const whisper_params & params);
|
||||||
|
|
||||||
bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||||
for (int i = 1; i < argc; i++) {
|
for (int i = 1; i < argc; i++) {
|
||||||
std::string arg = argv[i];
|
std::string arg = argv[i];
|
||||||
|
|
||||||
@ -109,7 +109,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str());
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
uint64_t wait_for_vad(audio_async & audio, json jparams, const whisper_params & params, uint64_t maxlength_ms, std::vector<float> & pcmf32) {
|
static uint64_t wait_for_vad(audio_async & audio, json jparams, const whisper_params & params, uint64_t maxlength_ms, std::vector<float> & pcmf32) {
|
||||||
using namespace std::chrono;
|
using namespace std::chrono;
|
||||||
uint64_t time_now = time_point_cast<milliseconds>(system_clock::now()).time_since_epoch().count();
|
uint64_t time_now = time_point_cast<milliseconds>(system_clock::now()).time_since_epoch().count();
|
||||||
uint64_t start_time = time_now;
|
uint64_t start_time = time_now;
|
||||||
@ -153,7 +153,7 @@ uint64_t wait_for_vad(audio_async & audio, json jparams, const whisper_params &
|
|||||||
return time_now;
|
return time_now;
|
||||||
}
|
}
|
||||||
|
|
||||||
json unguided_transcription(struct whisper_context * ctx, audio_async &audio, json jparams, const whisper_params ¶ms) {
|
static json unguided_transcription(struct whisper_context * ctx, audio_async &audio, json jparams, const whisper_params ¶ms) {
|
||||||
std::vector<whisper_token> prompt_tokens;
|
std::vector<whisper_token> prompt_tokens;
|
||||||
std::vector<float> pcmf32;
|
std::vector<float> pcmf32;
|
||||||
uint64_t unprocessed_audio_timestamp = wait_for_vad(audio, jparams, params, 10000U, pcmf32);
|
uint64_t unprocessed_audio_timestamp = wait_for_vad(audio, jparams, params, 10000U, pcmf32);
|
||||||
@ -181,7 +181,7 @@ json unguided_transcription(struct whisper_context * ctx, audio_async &audio, js
|
|||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
|
||||||
wparams.audio_ctx = params.audio_ctx;
|
wparams.audio_ctx = params.audio_ctx;
|
||||||
wparams.suppress_non_speech_tokens = true;
|
wparams.suppress_nst = true;
|
||||||
// run the transformer and a single decoding pass
|
// run the transformer and a single decoding pass
|
||||||
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
||||||
fprintf(stderr, "%s: ERROR: whisper_full() failed\n", __func__);
|
fprintf(stderr, "%s: ERROR: whisper_full() failed\n", __func__);
|
||||||
@ -199,7 +199,7 @@ json unguided_transcription(struct whisper_context * ctx, audio_async &audio, js
|
|||||||
|
|
||||||
// command-list mode
|
// command-list mode
|
||||||
// guide the transcription to match the most likely command from a provided list
|
// guide the transcription to match the most likely command from a provided list
|
||||||
json guided_transcription(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms, json jparams, std::vector<struct commandset> commandset_list) {
|
static json guided_transcription(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms, json jparams, std::vector<struct commandset> commandset_list) {
|
||||||
struct commandset cs = commandset_list[jparams.value("commandset_index", commandset_list.size()-1)];
|
struct commandset cs = commandset_list[jparams.value("commandset_index", commandset_list.size()-1)];
|
||||||
std::vector<float> pcmf32;
|
std::vector<float> pcmf32;
|
||||||
uint64_t unprocessed_audio_timestamp = wait_for_vad(audio, jparams, params, 2000U, pcmf32);
|
uint64_t unprocessed_audio_timestamp = wait_for_vad(audio, jparams, params, 2000U, pcmf32);
|
||||||
@ -225,7 +225,7 @@ json guided_transcription(struct whisper_context * ctx, audio_async &audio, cons
|
|||||||
wparams.prompt_tokens = cs.prompt_tokens.data();
|
wparams.prompt_tokens = cs.prompt_tokens.data();
|
||||||
wparams.prompt_n_tokens = cs.prompt_tokens.size();
|
wparams.prompt_n_tokens = cs.prompt_tokens.size();
|
||||||
// TODO: properly expose as option
|
// TODO: properly expose as option
|
||||||
wparams.suppress_non_speech_tokens = true;
|
wparams.suppress_nst = true;
|
||||||
|
|
||||||
// run the transformer and a single decoding pass
|
// run the transformer and a single decoding pass
|
||||||
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
||||||
@ -285,7 +285,7 @@ json guided_transcription(struct whisper_context * ctx, audio_async &audio, cons
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
json register_commandset(struct whisper_context * ctx, json jparams, std::vector<struct commandset> &commandset_list) {
|
static json register_commandset(struct whisper_context * ctx, json jparams, std::vector<struct commandset> &commandset_list) {
|
||||||
// TODO: check for token collision
|
// TODO: check for token collision
|
||||||
struct commandset cs;
|
struct commandset cs;
|
||||||
|
|
||||||
@ -325,7 +325,8 @@ json register_commandset(struct whisper_context * ctx, json jparams, std::vector
|
|||||||
commandset_list.push_back(cs);
|
commandset_list.push_back(cs);
|
||||||
return json{{"index",index}};
|
return json{{"index",index}};
|
||||||
}
|
}
|
||||||
json seek(struct whisper_context * /*ctx*/, audio_async & /*audio*/, json /*params*/) {
|
|
||||||
|
static json seek(struct whisper_context * /*ctx*/, audio_async & /*audio*/, json /*params*/) {
|
||||||
// whisper_state has the pertinent offsets, but there also seem to be a large
|
// whisper_state has the pertinent offsets, but there also seem to be a large
|
||||||
// number of scratch buffers that would prevent rewinding context in a manner similar to llama
|
// number of scratch buffers that would prevent rewinding context in a manner similar to llama
|
||||||
// I'll give this a another pass once everything else is implemented,
|
// I'll give this a another pass once everything else is implemented,
|
||||||
@ -335,7 +336,8 @@ json seek(struct whisper_context * /*ctx*/, audio_async & /*audio*/, json /*para
|
|||||||
{"message", "Seeking is not yet supported."}
|
{"message", "Seeking is not yet supported."}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
json parse_job(const json &body, struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms, std::vector<struct commandset> &commandset_list) {
|
|
||||||
|
static json parse_job(const json &body, struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms, std::vector<struct commandset> &commandset_list) {
|
||||||
// See: https://www.jsonrpc.org/specification
|
// See: https://www.jsonrpc.org/specification
|
||||||
json id = body.at("id");
|
json id = body.at("id");
|
||||||
try {
|
try {
|
||||||
@ -375,7 +377,7 @@ json parse_job(const json &body, struct whisper_context * ctx, audio_async &audi
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void process_loop(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms) {
|
static void process_loop(struct whisper_context * ctx, audio_async &audio, const whisper_params ¶ms) {
|
||||||
std::deque<json> jobqueue;
|
std::deque<json> jobqueue;
|
||||||
std::vector<struct commandset> commandset_list;
|
std::vector<struct commandset> commandset_list;
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -21,7 +21,7 @@ def process_audio(wav_file, model_name="base.en"):
|
|||||||
if not os.path.exists(wav_file):
|
if not os.path.exists(wav_file):
|
||||||
raise FileNotFoundError(f"WAV file not found: {wav_file}")
|
raise FileNotFoundError(f"WAV file not found: {wav_file}")
|
||||||
|
|
||||||
full_command = f"./main -m {model} -f {wav_file} -np -nt"
|
full_command = f"./main -m {model} -f {wav_file} -nt"
|
||||||
|
|
||||||
# Execute the command
|
# Execute the command
|
||||||
process = subprocess.Popen(full_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
process = subprocess.Popen(full_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
@ -36,7 +36,7 @@ struct whisper_filters {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// quantize a model
|
// quantize a model
|
||||||
bool whisper_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_ftype ftype) {
|
static bool whisper_model_quantize(const std::string & fname_inp, const std::string & fname_out, ggml_ftype ftype) {
|
||||||
gpt_vocab vocab;
|
gpt_vocab vocab;
|
||||||
|
|
||||||
printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str());
|
printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str());
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
set(TARGET server)
|
set(TARGET whisper-server)
|
||||||
add_executable(${TARGET} server.cpp httplib.h)
|
add_executable(${TARGET} server.cpp httplib.h)
|
||||||
|
|
||||||
include(DefaultTargetOptions)
|
include(DefaultTargetOptions)
|
||||||
@ -8,3 +8,5 @@ target_link_libraries(${TARGET} PRIVATE common json_cpp whisper ${CMAKE_THREAD_L
|
|||||||
if (WIN32)
|
if (WIN32)
|
||||||
target_link_libraries(${TARGET} PRIVATE ws2_32)
|
target_link_libraries(${TARGET} PRIVATE ws2_32)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user