mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-07-08 14:16:53 +02:00
Compare commits
3 Commits
Author | SHA1 | Date | |
---|---|---|---|
9c61f5f585 | |||
c94c469592 | |||
feac80dd3f |
@ -1,6 +1,6 @@
|
||||
cmake_minimum_required (VERSION 3.0)
|
||||
|
||||
project(whisper.cpp VERSION 1.4.0)
|
||||
project(whisper.cpp VERSION 1.4.1)
|
||||
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
|
||||
add_compile_options(/utf-8)
|
||||
|
@ -6,7 +6,7 @@
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://www.npmjs.com/package/whisper.cpp/)
|
||||
|
||||
Beta: [v1.4.0](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.4.0) / Stable: [v1.2.1](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.2.1) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
||||
Beta: [v1.4.1](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.4.1) / Stable: [v1.2.1](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.2.1) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
||||
|
||||
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
||||
|
||||
|
Submodule bindings/ios updated: 30edc4c500...af745e4f2f
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "whisper.cpp",
|
||||
"version": "1.4.0",
|
||||
"version": "1.4.1",
|
||||
"description": "Whisper speech recognition",
|
||||
"main": "whisper.js",
|
||||
"scripts": {
|
||||
|
@ -90,7 +90,7 @@ bool ggml_common_quantize_0(
|
||||
}
|
||||
|
||||
int32_t nelements = 1;
|
||||
int32_t ne[2] = { 1, 1 };
|
||||
int32_t ne[4] = { 1, 1, 1, 1 };
|
||||
for (int i = 0; i < n_dims; ++i) {
|
||||
finp.read (reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
|
||||
nelements *= ne[i];
|
||||
@ -99,7 +99,7 @@ bool ggml_common_quantize_0(
|
||||
std::string name(length, 0);
|
||||
finp.read (&name[0], length);
|
||||
|
||||
printf("%64s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ggml_type_name((ggml_type) ttype));
|
||||
printf("%64s - [%5d, %5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ne[2], ggml_type_name((ggml_type) ttype));
|
||||
|
||||
bool quantize = false;
|
||||
|
||||
@ -204,11 +204,11 @@ bool ggml_common_quantize_0(
|
||||
total_size_new += cur_size;
|
||||
|
||||
printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0);
|
||||
for (int i = 0; i < hist_cur.size(); ++i) {
|
||||
for (int i = 0; i < (int) hist_cur.size(); ++i) {
|
||||
hist_all[i] += hist_cur[i];
|
||||
}
|
||||
|
||||
for (int i = 0; i < hist_cur.size(); ++i) {
|
||||
for (int i = 0; i < (int) hist_cur.size(); ++i) {
|
||||
printf("%5.3f ", hist_cur[i] / (float)nelements);
|
||||
}
|
||||
printf("\n");
|
||||
@ -226,12 +226,12 @@ bool ggml_common_quantize_0(
|
||||
|
||||
{
|
||||
int64_t sum_all = 0;
|
||||
for (int i = 0; i < hist_all.size(); ++i) {
|
||||
for (int i = 0; i < (int) hist_all.size(); ++i) {
|
||||
sum_all += hist_all[i];
|
||||
}
|
||||
|
||||
printf("%s: hist: ", __func__);
|
||||
for (int i = 0; i < hist_all.size(); ++i) {
|
||||
for (int i = 0; i < (int) hist_all.size(); ++i) {
|
||||
printf("%5.3f ", hist_all[i] / (float)sum_all);
|
||||
}
|
||||
printf("\n");
|
||||
|
24
ggml.c
24
ggml.c
@ -1911,8 +1911,8 @@ static void dequantize_row_q5_0(const void * restrict vx, float * restrict y, in
|
||||
const uint8_t vi = pp[l/2];
|
||||
|
||||
// extract the 5-th bit from qh
|
||||
const uint8_t vh0 = ((qh & (1 << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1 << (l + 1))) >> (l + 1)) << 4;
|
||||
const uint8_t vh0 = ((qh & (1u << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1u << (l + 1))) >> (l + 1)) << 4;
|
||||
|
||||
const int8_t vi0 = (vi & 0x0F) | vh0;
|
||||
const int8_t vi1 = (vi >> 4) | vh1;
|
||||
@ -1948,8 +1948,8 @@ static void dequantize_row_q5_1(const void * restrict vx, float * restrict y, in
|
||||
const uint8_t vi = pp[l/2];
|
||||
|
||||
// extract the 5-th bit from qh
|
||||
const uint8_t vh0 = ((qh & (1 << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1 << (l + 1))) >> (l + 1)) << 4;
|
||||
const uint8_t vh0 = ((qh & (1u << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1u << (l + 1))) >> (l + 1)) << 4;
|
||||
|
||||
const uint8_t vi0 = (vi & 0x0F) | vh0;
|
||||
const uint8_t vi1 = (vi >> 4) | vh1;
|
||||
@ -3286,8 +3286,8 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void *
|
||||
for (int j = 0; j < QK8_0/2; j++) {
|
||||
const uint8_t v0 = x0[j];
|
||||
|
||||
const int x0_0h = ((qh & (1 << (2*j + 0))) >> (2*j + 0)) << 4;
|
||||
const int x1_0h = ((qh & (1 << (2*j + 1))) >> (2*j + 1)) << 4;
|
||||
const int x0_0h = ((qh & (1u << (2*j + 0))) >> (2*j + 0)) << 4;
|
||||
const int x1_0h = ((qh & (1u << (2*j + 1))) >> (2*j + 1)) << 4;
|
||||
|
||||
const int x0_0 = ((v0 & 0x0F) | x0_0h) - 16;
|
||||
const int x1_0 = ((v0 >> 4) | x1_0h) - 16;
|
||||
@ -3491,8 +3491,8 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void *
|
||||
for (int j = 0; j < QK8_1/2; j++) {
|
||||
const uint8_t v0 = x0[j];
|
||||
|
||||
const int x0_0h = ((qh & (1 << (2*j + 0))) >> (2*j + 0)) << 4;
|
||||
const int x1_0h = ((qh & (1 << (2*j + 1))) >> (2*j + 1)) << 4;
|
||||
const int x0_0h = ((qh & (1u << (2*j + 0))) >> (2*j + 0)) << 4;
|
||||
const int x1_0h = ((qh & (1u << (2*j + 1))) >> (2*j + 1)) << 4;
|
||||
|
||||
const int x0_0 = (v0 & 0x0F) | x0_0h;
|
||||
const int x1_0 = (v0 >> 4) | x1_0h;
|
||||
@ -13057,8 +13057,8 @@ size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t *
|
||||
memcpy(&qh, &y[i].qh, sizeof(qh));
|
||||
|
||||
for (int l = 0; l < QK5_0; l += 2) {
|
||||
const uint8_t vh0 = ((qh & (1 << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1 << (l + 1))) >> (l + 1)) << 4;
|
||||
const uint8_t vh0 = ((qh & (1u << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1u << (l + 1))) >> (l + 1)) << 4;
|
||||
|
||||
// cast to 16 bins
|
||||
const uint8_t vi0 = ((y[i].qs[l/2] & 0x0F) | vh0) / 2;
|
||||
@ -13087,8 +13087,8 @@ size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t *
|
||||
memcpy(&qh, &y[i].qh, sizeof(qh));
|
||||
|
||||
for (int l = 0; l < QK5_1; l += 2) {
|
||||
const uint8_t vh0 = ((qh & (1 << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1 << (l + 1))) >> (l + 1)) << 4;
|
||||
const uint8_t vh0 = ((qh & (1u << (l + 0))) >> (l + 0)) << 4;
|
||||
const uint8_t vh1 = ((qh & (1u << (l + 1))) >> (l + 1)) << 4;
|
||||
|
||||
// cast to 16 bins
|
||||
const uint8_t vi0 = ((y[i].qs[l/2] & 0x0F) | vh0) / 2;
|
||||
|
@ -1333,7 +1333,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
|
||||
}
|
||||
|
||||
int32_t nelements = 1;
|
||||
int32_t ne[3] = { 1, 1, 1 };
|
||||
int32_t ne[4] = { 1, 1, 1, 1 };
|
||||
for (int i = 0; i < n_dims; ++i) {
|
||||
read_safe(loader, ne[i]);
|
||||
nelements *= ne[i];
|
||||
@ -1352,6 +1352,8 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
|
||||
auto tensor = model.tensors[name.data()];
|
||||
if (ggml_nelements(tensor) != nelements) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
||||
fprintf(stderr, "%s: shape: [%d, %d, %d], expected: [%d, %d, %d]\n",
|
||||
__func__, ne[0], ne[1], ne[2], (int) tensor->ne[0], (int) tensor->ne[1], (int) tensor->ne[2]);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user