mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-01-13 09:28:35 +01:00
parallel : working
This commit is contained in:
parent
a272f10b2e
commit
0b2dc3c82c
@ -22,6 +22,7 @@ if (EMSCRIPTEN)
|
|||||||
add_subdirectory(whisper.wasm)
|
add_subdirectory(whisper.wasm)
|
||||||
else()
|
else()
|
||||||
add_subdirectory(main)
|
add_subdirectory(main)
|
||||||
|
add_subdirectory(parallel)
|
||||||
add_subdirectory(stream)
|
add_subdirectory(stream)
|
||||||
add_subdirectory(bench)
|
add_subdirectory(bench)
|
||||||
endif()
|
endif()
|
||||||
|
@ -384,7 +384,6 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.translate = params.translate;
|
wparams.translate = params.translate;
|
||||||
wparams.language = params.language.c_str();
|
wparams.language = params.language.c_str();
|
||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
wparams.n_processors = 1;
|
|
||||||
wparams.n_max_text_ctx = params.max_context >= 0 ? params.max_context : wparams.n_max_text_ctx;
|
wparams.n_max_text_ctx = params.max_context >= 0 ? params.max_context : wparams.n_max_text_ctx;
|
||||||
wparams.offset_ms = params.offset_t_ms;
|
wparams.offset_ms = params.offset_t_ms;
|
||||||
|
|
||||||
|
@ -38,10 +38,12 @@ std::string to_timestamp(int64_t t, bool comma = false) {
|
|||||||
|
|
||||||
// command-line parameters
|
// command-line parameters
|
||||||
struct whisper_params {
|
struct whisper_params {
|
||||||
int32_t seed = -1; // RNG seed, not used currently
|
int32_t seed = -1; // RNG seed, not used currently
|
||||||
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
int32_t n_threads = std::max(std::min(4, (int32_t) std::thread::hardware_concurrency()) / 2, 1);
|
||||||
int32_t offset_t_ms = 0;
|
int32_t n_processors = 2;
|
||||||
int32_t offset_n = 0;
|
int32_t offset_t_ms = 0;
|
||||||
|
int32_t offset_n = 0;
|
||||||
|
int32_t max_context = -1;
|
||||||
|
|
||||||
bool verbose = false;
|
bool verbose = false;
|
||||||
bool translate = false;
|
bool translate = false;
|
||||||
@ -73,10 +75,14 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
params.seed = std::stoi(argv[++i]);
|
params.seed = std::stoi(argv[++i]);
|
||||||
} else if (arg == "-t" || arg == "--threads") {
|
} else if (arg == "-t" || arg == "--threads") {
|
||||||
params.n_threads = std::stoi(argv[++i]);
|
params.n_threads = std::stoi(argv[++i]);
|
||||||
|
} else if (arg == "-p" || arg == "--processors") {
|
||||||
|
params.n_processors = std::stoi(argv[++i]);
|
||||||
} else if (arg == "-ot" || arg == "--offset-t") {
|
} else if (arg == "-ot" || arg == "--offset-t") {
|
||||||
params.offset_t_ms = std::stoi(argv[++i]);
|
params.offset_t_ms = std::stoi(argv[++i]);
|
||||||
} else if (arg == "-on" || arg == "--offset-n") {
|
} else if (arg == "-on" || arg == "--offset-n") {
|
||||||
params.offset_n = std::stoi(argv[++i]);
|
params.offset_n = std::stoi(argv[++i]);
|
||||||
|
} else if (arg == "-mc" || arg == "--max-context") {
|
||||||
|
params.max_context = std::stoi(argv[++i]);
|
||||||
} else if (arg == "-v" || arg == "--verbose") {
|
} else if (arg == "-v" || arg == "--verbose") {
|
||||||
params.verbose = true;
|
params.verbose = true;
|
||||||
} else if (arg == "--translate") {
|
} else if (arg == "--translate") {
|
||||||
@ -125,8 +131,10 @@ void whisper_print_usage(int argc, char ** argv, const whisper_params & params)
|
|||||||
fprintf(stderr, " -h, --help show this help message and exit\n");
|
fprintf(stderr, " -h, --help show this help message and exit\n");
|
||||||
fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n");
|
fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n");
|
||||||
fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
|
fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
|
||||||
|
fprintf(stderr, " -p N, --processors N number of processors to use during computation (default: %d)\n", params.n_processors);
|
||||||
fprintf(stderr, " -ot N, --offset-t N time offset in milliseconds (default: %d)\n", params.offset_t_ms);
|
fprintf(stderr, " -ot N, --offset-t N time offset in milliseconds (default: %d)\n", params.offset_t_ms);
|
||||||
fprintf(stderr, " -on N, --offset-n N segment index offset (default: %d)\n", params.offset_n);
|
fprintf(stderr, " -on N, --offset-n N segment index offset (default: %d)\n", params.offset_n);
|
||||||
|
fprintf(stderr, " -mc N, --max-context N maximum number of text context tokens to store (default: max)\n");
|
||||||
fprintf(stderr, " -v, --verbose verbose output\n");
|
fprintf(stderr, " -v, --verbose verbose output\n");
|
||||||
fprintf(stderr, " --translate translate from source language to english\n");
|
fprintf(stderr, " --translate translate from source language to english\n");
|
||||||
fprintf(stderr, " -otxt, --output-txt output result in a text file\n");
|
fprintf(stderr, " -otxt, --output-txt output result in a text file\n");
|
||||||
@ -359,8 +367,9 @@ int main(int argc, char ** argv) {
|
|||||||
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
|
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, lang = %s, task = %s, timestamps = %d ...\n",
|
fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, lang = %s, task = %s, timestamps = %d ...\n",
|
||||||
__func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE, params.n_threads,
|
__func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
|
||||||
|
params.n_threads, params.n_processors,
|
||||||
params.language.c_str(),
|
params.language.c_str(),
|
||||||
params.translate ? "translate" : "transcribe",
|
params.translate ? "translate" : "transcribe",
|
||||||
params.no_timestamps ? 0 : 1);
|
params.no_timestamps ? 0 : 1);
|
||||||
@ -380,6 +389,7 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.translate = params.translate;
|
wparams.translate = params.translate;
|
||||||
wparams.language = params.language.c_str();
|
wparams.language = params.language.c_str();
|
||||||
wparams.n_threads = params.n_threads;
|
wparams.n_threads = params.n_threads;
|
||||||
|
wparams.n_max_text_ctx = params.max_context >= 0 ? params.max_context : wparams.n_max_text_ctx;
|
||||||
wparams.offset_ms = params.offset_t_ms;
|
wparams.offset_ms = params.offset_t_ms;
|
||||||
|
|
||||||
// this callback is called on each new segment
|
// this callback is called on each new segment
|
||||||
@ -388,7 +398,7 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.new_segment_callback_user_data = ¶ms;
|
wparams.new_segment_callback_user_data = ¶ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
if (whisper_full_parallel(ctx, wparams, pcmf32.data(), pcmf32.size(), params.n_processors) != 0) {
|
||||||
fprintf(stderr, "%s: failed to process audio\n", argv[0]);
|
fprintf(stderr, "%s: failed to process audio\n", argv[0]);
|
||||||
return 8;
|
return 8;
|
||||||
}
|
}
|
||||||
|
2
ggml.h
2
ggml.h
@ -11,7 +11,7 @@ extern "C" {
|
|||||||
#define GGML_MAX_DIMS 4
|
#define GGML_MAX_DIMS 4
|
||||||
#define GGML_MAX_NODES 4096
|
#define GGML_MAX_NODES 4096
|
||||||
#define GGML_MAX_PARAMS 16
|
#define GGML_MAX_PARAMS 16
|
||||||
#define GGML_MAX_CONTEXTS 16
|
#define GGML_MAX_CONTEXTS 64
|
||||||
#define GGML_MAX_OPT 4
|
#define GGML_MAX_OPT 4
|
||||||
|
|
||||||
#ifdef __ARM_NEON
|
#ifdef __ARM_NEON
|
||||||
|
242
whisper.cpp
242
whisper.cpp
@ -379,6 +379,7 @@ struct whisper_model {
|
|||||||
|
|
||||||
// context
|
// context
|
||||||
struct ggml_context * ctx;
|
struct ggml_context * ctx;
|
||||||
|
struct ggml_context * ctx_mem;
|
||||||
|
|
||||||
// tensors
|
// tensors
|
||||||
int n_loaded;
|
int n_loaded;
|
||||||
@ -393,9 +394,10 @@ struct whisper_context {
|
|||||||
int64_t t_decode_us = 0;
|
int64_t t_decode_us = 0;
|
||||||
int64_t t_start_us = 0;
|
int64_t t_start_us = 0;
|
||||||
|
|
||||||
std::vector<uint8_t> buf_model;
|
std::vector<uint8_t> * buf_model; // the model buffer is read-only and can be shared between processors
|
||||||
std::vector<uint8_t> buf_compute;
|
std::vector<uint8_t> buf_memory;
|
||||||
std::vector<uint8_t> buf_compute_layer;
|
std::vector<uint8_t> buf_compute;
|
||||||
|
std::vector<uint8_t> buf_compute_layer;
|
||||||
|
|
||||||
whisper_model model;
|
whisper_model model;
|
||||||
whisper_vocab vocab;
|
whisper_vocab vocab;
|
||||||
@ -421,7 +423,7 @@ struct whisper_context {
|
|||||||
//
|
//
|
||||||
// see the convert-pt-to-ggml.py script for details
|
// see the convert-pt-to-ggml.py script for details
|
||||||
//
|
//
|
||||||
bool whisper_model_load(const std::string & fname, const int n_processors, whisper_context & wctx) {
|
bool whisper_model_load(const std::string & fname, whisper_context & wctx) {
|
||||||
fprintf(stderr, "%s: loading model from '%s'\n", __func__, fname.c_str());
|
fprintf(stderr, "%s: loading model from '%s'\n", __func__, fname.c_str());
|
||||||
|
|
||||||
auto & model = wctx.model;
|
auto & model = wctx.model;
|
||||||
@ -494,13 +496,16 @@ bool whisper_model_load(const std::string & fname, const int n_processors, whisp
|
|||||||
fprintf(stderr, "%s: f16 = %d\n", __func__, hparams.f16);
|
fprintf(stderr, "%s: f16 = %d\n", __func__, hparams.f16);
|
||||||
fprintf(stderr, "%s: type = %d\n", __func__, model.type);
|
fprintf(stderr, "%s: type = %d\n", __func__, model.type);
|
||||||
|
|
||||||
wctx.buf_model.resize(MEM_REQ_MODEL.at(model.type));
|
wctx.buf_model = new std::vector<uint8_t>();
|
||||||
|
wctx.buf_model->resize(MEM_REQ_MODEL.at(model.type));
|
||||||
|
wctx.buf_memory.resize(std::max(MEM_REQ_MODEL.at(model.type), MEM_REQ_MODEL.at(model.type))); // TODO: TMP !!!
|
||||||
wctx.buf_compute.resize(std::max(MEM_REQ_ENCODE.at(model.type), MEM_REQ_DECODE.at(model.type)));
|
wctx.buf_compute.resize(std::max(MEM_REQ_ENCODE.at(model.type), MEM_REQ_DECODE.at(model.type)));
|
||||||
wctx.buf_compute_layer.resize(std::max(MEM_REQ_ENCODE_LAYER.at(model.type), MEM_REQ_DECODE_LAYER.at(model.type)));
|
wctx.buf_compute_layer.resize(std::max(MEM_REQ_ENCODE_LAYER.at(model.type), MEM_REQ_DECODE_LAYER.at(model.type)));
|
||||||
|
|
||||||
// this is the total memory required to run the inference
|
// this is the total memory required to run the inference
|
||||||
const size_t mem_required =
|
const size_t mem_required =
|
||||||
wctx.buf_model.size() +
|
wctx.buf_model->size() +
|
||||||
|
wctx.buf_memory.size() +
|
||||||
wctx.buf_compute.size() +
|
wctx.buf_compute.size() +
|
||||||
wctx.buf_compute_layer.size();
|
wctx.buf_compute_layer.size();
|
||||||
|
|
||||||
@ -583,6 +588,7 @@ bool whisper_model_load(const std::string & fname, const int n_processors, whisp
|
|||||||
|
|
||||||
|
|
||||||
size_t ctx_size = 0;
|
size_t ctx_size = 0;
|
||||||
|
size_t ctx_mem_size = 0;
|
||||||
|
|
||||||
{
|
{
|
||||||
const auto & hparams = model.hparams;
|
const auto & hparams = model.hparams;
|
||||||
@ -691,11 +697,11 @@ bool whisper_model_load(const std::string & fname, const int n_processors, whisp
|
|||||||
ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_ln_1_b
|
ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_ln_1_b
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx_size += n_processors*n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_k
|
ctx_mem_size += n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_k
|
||||||
ctx_size += n_processors*n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_v
|
ctx_mem_size += n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_v
|
||||||
|
|
||||||
ctx_size += n_processors*n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_cross_k
|
ctx_mem_size += n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_cross_k
|
||||||
ctx_size += n_processors*n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_cross_v
|
ctx_mem_size += n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_cross_v
|
||||||
|
|
||||||
ctx_size += (15 + 15*n_audio_layer + 24*n_text_layer)*256; // object overhead
|
ctx_size += (15 + 15*n_audio_layer + 24*n_text_layer)*256; // object overhead
|
||||||
|
|
||||||
@ -705,8 +711,8 @@ bool whisper_model_load(const std::string & fname, const int n_processors, whisp
|
|||||||
// create the ggml context
|
// create the ggml context
|
||||||
{
|
{
|
||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
.mem_size = wctx.buf_model.size(),
|
.mem_size = wctx.buf_model->size(),
|
||||||
.mem_buffer = wctx.buf_model.data(),
|
.mem_buffer = wctx.buf_model->data(),
|
||||||
};
|
};
|
||||||
|
|
||||||
model.ctx = ggml_init(params);
|
model.ctx = ggml_init(params);
|
||||||
@ -716,6 +722,20 @@ bool whisper_model_load(const std::string & fname, const int n_processors, whisp
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// create the ggml memory context
|
||||||
|
{
|
||||||
|
struct ggml_init_params params = {
|
||||||
|
.mem_size = wctx.buf_memory.size(),
|
||||||
|
.mem_buffer = wctx.buf_memory.data(),
|
||||||
|
};
|
||||||
|
|
||||||
|
model.ctx_mem = ggml_init(params);
|
||||||
|
if (!model.ctx_mem) {
|
||||||
|
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// prepare memory for the weights
|
// prepare memory for the weights
|
||||||
{
|
{
|
||||||
auto & ctx = model.ctx;
|
auto & ctx = model.ctx;
|
||||||
@ -914,7 +934,7 @@ bool whisper_model_load(const std::string & fname, const int n_processors, whisp
|
|||||||
|
|
||||||
// key + value memory
|
// key + value memory
|
||||||
{
|
{
|
||||||
auto & ctx = model.ctx;
|
auto & ctx = model.ctx_mem;
|
||||||
|
|
||||||
const auto & hparams = model.hparams;
|
const auto & hparams = model.hparams;
|
||||||
|
|
||||||
@ -925,7 +945,7 @@ bool whisper_model_load(const std::string & fname, const int n_processors, whisp
|
|||||||
// key/value memory for the self-attention layer
|
// key/value memory for the self-attention layer
|
||||||
{
|
{
|
||||||
const int n_mem = n_text_layer*n_text_ctx;
|
const int n_mem = n_text_layer*n_text_ctx;
|
||||||
const int n_elements = n_text_state*n_mem*n_processors;
|
const int n_elements = n_text_state*n_mem;
|
||||||
|
|
||||||
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||||
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||||
@ -936,7 +956,7 @@ bool whisper_model_load(const std::string & fname, const int n_processors, whisp
|
|||||||
const int n_audio_ctx = hparams.n_audio_ctx;
|
const int n_audio_ctx = hparams.n_audio_ctx;
|
||||||
|
|
||||||
const int n_mem = n_text_layer*n_audio_ctx;
|
const int n_mem = n_text_layer*n_audio_ctx;
|
||||||
const int n_elements = n_text_state*n_mem*n_processors;
|
const int n_elements = n_text_state*n_mem;
|
||||||
|
|
||||||
model.memory_cross_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
model.memory_cross_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||||
model.memory_cross_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
model.memory_cross_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||||
@ -946,7 +966,7 @@ bool whisper_model_load(const std::string & fname, const int n_processors, whisp
|
|||||||
ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v) +
|
ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v) +
|
||||||
ggml_nbytes(model.memory_cross_k) + ggml_nbytes(model.memory_cross_v);
|
ggml_nbytes(model.memory_cross_k) + ggml_nbytes(model.memory_cross_v);
|
||||||
|
|
||||||
fprintf(stderr, "%s: memory size = %8.2f MB (%d processors)\n", __func__, memory_size/1024.0/1024.0, n_processors);
|
fprintf(stderr, "%s: memory size = %8.2f MB\n", __func__, memory_size/1024.0/1024.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// load weights
|
// load weights
|
||||||
@ -1037,8 +1057,7 @@ bool whisper_model_load(const std::string & fname, const int n_processors, whisp
|
|||||||
bool whisper_encode(
|
bool whisper_encode(
|
||||||
whisper_context & wctx,
|
whisper_context & wctx,
|
||||||
const int n_threads,
|
const int n_threads,
|
||||||
const int mel_offset,
|
const int mel_offset) {
|
||||||
const int processor_id) {
|
|
||||||
const auto & model = wctx.model;
|
const auto & model = wctx.model;
|
||||||
const auto & mel_inp = wctx.mel;
|
const auto & mel_inp = wctx.mel;
|
||||||
const auto & hparams = model.hparams;
|
const auto & hparams = model.hparams;
|
||||||
@ -1392,11 +1411,8 @@ bool whisper_encode(
|
|||||||
Vcross),
|
Vcross),
|
||||||
Vcross);
|
Vcross);
|
||||||
|
|
||||||
const size_t offset_k = processor_id*(ggml_element_size(model.memory_cross_k)*n_state)*(model.hparams.n_text_layer*n_ctx);
|
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_cross_k, n_state*n_ctx, (ggml_element_size(model.memory_cross_k)*n_state)*(il*n_ctx));
|
||||||
const size_t offset_v = processor_id*(ggml_element_size(model.memory_cross_v)*n_state)*(model.hparams.n_text_layer*n_ctx);
|
struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_cross_v, n_state*n_ctx, (ggml_element_size(model.memory_cross_v)*n_state)*(il*n_ctx));
|
||||||
|
|
||||||
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_cross_k, n_state*n_ctx, offset_k + (ggml_element_size(model.memory_cross_k)*n_state)*(il*n_ctx));
|
|
||||||
struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_cross_v, n_state*n_ctx, offset_v + (ggml_element_size(model.memory_cross_v)*n_state)*(il*n_ctx));
|
|
||||||
|
|
||||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcross, k));
|
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcross, k));
|
||||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcross, v));
|
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcross, v));
|
||||||
@ -1429,8 +1445,7 @@ bool whisper_decode(
|
|||||||
const int n_threads,
|
const int n_threads,
|
||||||
const whisper_token * tokens,
|
const whisper_token * tokens,
|
||||||
const int n_tokens,
|
const int n_tokens,
|
||||||
const int n_past,
|
const int n_past) {
|
||||||
const int processor_id) {
|
|
||||||
const auto & model = wctx.model;
|
const auto & model = wctx.model;
|
||||||
const auto & hparams = model.hparams;
|
const auto & hparams = model.hparams;
|
||||||
|
|
||||||
@ -1525,13 +1540,10 @@ bool whisper_decode(
|
|||||||
Vcur),
|
Vcur),
|
||||||
Vcur);
|
Vcur);
|
||||||
|
|
||||||
const size_t offset_k = processor_id*(ggml_element_size(model.memory_k)*n_state)*(n_layer*n_ctx);
|
|
||||||
const size_t offset_v = processor_id*(ggml_element_size(model.memory_v)*n_state)*(n_layer*n_ctx);
|
|
||||||
|
|
||||||
// store key and value to memory
|
// store key and value to memory
|
||||||
{
|
{
|
||||||
struct ggml_tensor * k = ggml_view_1d(ctxL, model.memory_k, N*n_state, offset_k + (ggml_element_size(model.memory_k)*n_state)*(il*n_ctx + n_past));
|
struct ggml_tensor * k = ggml_view_1d(ctxL, model.memory_k, N*n_state, (ggml_element_size(model.memory_k)*n_state)*(il*n_ctx + n_past));
|
||||||
struct ggml_tensor * v = ggml_view_1d(ctxL, model.memory_v, N*n_state, offset_v + (ggml_element_size(model.memory_v)*n_state)*(il*n_ctx + n_past));
|
struct ggml_tensor * v = ggml_view_1d(ctxL, model.memory_v, N*n_state, (ggml_element_size(model.memory_v)*n_state)*(il*n_ctx + n_past));
|
||||||
|
|
||||||
ggml_build_forward_expand(&gf, ggml_cpy(ctxL, Kcur, k));
|
ggml_build_forward_expand(&gf, ggml_cpy(ctxL, Kcur, k));
|
||||||
ggml_build_forward_expand(&gf, ggml_cpy(ctxL, Vcur, v));
|
ggml_build_forward_expand(&gf, ggml_cpy(ctxL, Vcur, v));
|
||||||
@ -1549,7 +1561,7 @@ bool whisper_decode(
|
|||||||
struct ggml_tensor * K =
|
struct ggml_tensor * K =
|
||||||
ggml_permute(ctxL,
|
ggml_permute(ctxL,
|
||||||
ggml_reshape_3d(ctxL,
|
ggml_reshape_3d(ctxL,
|
||||||
ggml_view_1d(ctxL, model.memory_k, (n_past + N)*n_state, offset_k + il*n_ctx*ggml_element_size(model.memory_k)*n_state),
|
ggml_view_1d(ctxL, model.memory_k, (n_past + N)*n_state, il*n_ctx*ggml_element_size(model.memory_k)*n_state),
|
||||||
n_state/n_head, n_head, n_past + N),
|
n_state/n_head, n_head, n_past + N),
|
||||||
0, 2, 1, 3);
|
0, 2, 1, 3);
|
||||||
|
|
||||||
@ -1569,7 +1581,7 @@ bool whisper_decode(
|
|||||||
struct ggml_tensor * V_trans =
|
struct ggml_tensor * V_trans =
|
||||||
ggml_permute(ctxL,
|
ggml_permute(ctxL,
|
||||||
ggml_reshape_3d(ctxL,
|
ggml_reshape_3d(ctxL,
|
||||||
ggml_view_1d(ctxL, model.memory_v, (n_past + N)*n_state, offset_v + il*n_ctx*ggml_element_size(model.memory_v)*n_state),
|
ggml_view_1d(ctxL, model.memory_v, (n_past + N)*n_state, il*n_ctx*ggml_element_size(model.memory_v)*n_state),
|
||||||
n_state/n_head, n_head, n_past + N),
|
n_state/n_head, n_head, n_past + N),
|
||||||
1, 2, 0, 3);
|
1, 2, 0, 3);
|
||||||
|
|
||||||
@ -1621,18 +1633,15 @@ bool whisper_decode(
|
|||||||
|
|
||||||
Qcur = ggml_scale(ctxL, Qcur, ggml_new_f32(ctxL, pow(float(n_state)/n_head, -0.25)));
|
Qcur = ggml_scale(ctxL, Qcur, ggml_new_f32(ctxL, pow(float(n_state)/n_head, -0.25)));
|
||||||
|
|
||||||
const size_t offset_k = processor_id*(ggml_element_size(model.memory_cross_k)*n_state)*(n_layer*M);
|
|
||||||
const size_t offset_v = processor_id*(ggml_element_size(model.memory_cross_v)*n_state)*(n_layer*M);
|
|
||||||
|
|
||||||
// Kcross is already scaled
|
// Kcross is already scaled
|
||||||
struct ggml_tensor * Kcross =
|
struct ggml_tensor * Kcross =
|
||||||
ggml_reshape_3d(ctxL,
|
ggml_reshape_3d(ctxL,
|
||||||
ggml_view_1d(ctxL, model.memory_cross_k, M*n_state, offset_k + il*M*ggml_element_size(model.memory_cross_k)*n_state),
|
ggml_view_1d(ctxL, model.memory_cross_k, M*n_state, il*M*ggml_element_size(model.memory_cross_k)*n_state),
|
||||||
n_state/n_head, n_head, M);
|
n_state/n_head, n_head, M);
|
||||||
|
|
||||||
struct ggml_tensor * Vcross =
|
struct ggml_tensor * Vcross =
|
||||||
ggml_reshape_3d(ctxL,
|
ggml_reshape_3d(ctxL,
|
||||||
ggml_view_1d(ctxL, model.memory_cross_v, M*n_state, offset_v + il*M*ggml_element_size(model.memory_cross_v)*n_state),
|
ggml_view_1d(ctxL, model.memory_cross_v, M*n_state, il*M*ggml_element_size(model.memory_cross_v)*n_state),
|
||||||
n_state/n_head, n_head, M);
|
n_state/n_head, n_head, M);
|
||||||
|
|
||||||
// ------
|
// ------
|
||||||
@ -2118,26 +2127,7 @@ struct whisper_context * whisper_init(const char * path_model) {
|
|||||||
|
|
||||||
ctx->t_start_us = t_start_us;
|
ctx->t_start_us = t_start_us;
|
||||||
|
|
||||||
if (!whisper_model_load(path_model, 1, *ctx)) {
|
if (!whisper_model_load(path_model, *ctx)) {
|
||||||
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, path_model);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx->t_load_us = ggml_time_us() - t_start_us;
|
|
||||||
|
|
||||||
return ctx;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct whisper_context * whisper_init_parallel(const char * path_model, int n_processors) {
|
|
||||||
ggml_time_init();
|
|
||||||
|
|
||||||
whisper_context * ctx = new whisper_context;
|
|
||||||
|
|
||||||
const int64_t t_start_us = ggml_time_us();
|
|
||||||
|
|
||||||
ctx->t_start_us = t_start_us;
|
|
||||||
|
|
||||||
if (!whisper_model_load(path_model, n_processors, *ctx)) {
|
|
||||||
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, path_model);
|
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, path_model);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -2149,6 +2139,9 @@ struct whisper_context * whisper_init_parallel(const char * path_model, int n_pr
|
|||||||
|
|
||||||
void whisper_free(struct whisper_context * ctx) {
|
void whisper_free(struct whisper_context * ctx) {
|
||||||
if (ctx) {
|
if (ctx) {
|
||||||
|
if (ctx->buf_model) {
|
||||||
|
delete ctx->buf_model;
|
||||||
|
}
|
||||||
delete ctx;
|
delete ctx;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2188,7 +2181,7 @@ int whisper_set_mel(
|
|||||||
int whisper_encode(struct whisper_context * ctx, int offset, int n_threads) {
|
int whisper_encode(struct whisper_context * ctx, int offset, int n_threads) {
|
||||||
const int64_t t_start_us = ggml_time_us();
|
const int64_t t_start_us = ggml_time_us();
|
||||||
|
|
||||||
if (!whisper_encode(*ctx, n_threads, offset, 0)) {
|
if (!whisper_encode(*ctx, n_threads, offset)) {
|
||||||
fprintf(stderr, "%s: failed to eval\n", __func__);
|
fprintf(stderr, "%s: failed to eval\n", __func__);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -2201,7 +2194,7 @@ int whisper_encode(struct whisper_context * ctx, int offset, int n_threads) {
|
|||||||
int whisper_decode(struct whisper_context * ctx, const whisper_token * tokens, int n_tokens, int n_past, int n_threads) {
|
int whisper_decode(struct whisper_context * ctx, const whisper_token * tokens, int n_tokens, int n_past, int n_threads) {
|
||||||
const int64_t t_start_us = ggml_time_us();
|
const int64_t t_start_us = ggml_time_us();
|
||||||
|
|
||||||
if (!whisper_decode(*ctx, n_threads, tokens, n_tokens, n_past, 0)) {
|
if (!whisper_decode(*ctx, n_threads, tokens, n_tokens, n_past)) {
|
||||||
fprintf(stderr, "%s: failed to eval\n", __func__);
|
fprintf(stderr, "%s: failed to eval\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@ -2322,7 +2315,6 @@ struct whisper_full_params whisper_full_default_params(enum whisper_sampling_str
|
|||||||
/*.strategy =*/ WHISPER_SAMPLING_GREEDY,
|
/*.strategy =*/ WHISPER_SAMPLING_GREEDY,
|
||||||
|
|
||||||
/*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
|
/*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
|
||||||
/*.n_processors =*/ 1,
|
|
||||||
/*.n_max_text_ctx =*/ 16384,
|
/*.n_max_text_ctx =*/ 16384,
|
||||||
/*.offset_ms =*/ 0,
|
/*.offset_ms =*/ 0,
|
||||||
|
|
||||||
@ -2355,7 +2347,6 @@ struct whisper_full_params whisper_full_default_params(enum whisper_sampling_str
|
|||||||
/*.strategy =*/ WHISPER_SAMPLING_BEAM_SEARCH,
|
/*.strategy =*/ WHISPER_SAMPLING_BEAM_SEARCH,
|
||||||
|
|
||||||
/*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
|
/*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
|
||||||
/*.n_processors =*/ 1,
|
|
||||||
/*.n_max_text_ctx =*/ 16384,
|
/*.n_max_text_ctx =*/ 16384,
|
||||||
/*.offset_ms =*/ 0,
|
/*.offset_ms =*/ 0,
|
||||||
|
|
||||||
@ -2629,6 +2620,135 @@ int whisper_full(
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int whisper_full_parallel(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_full_params params,
|
||||||
|
const float * samples,
|
||||||
|
int n_samples,
|
||||||
|
const int n_processors) {
|
||||||
|
if (n_processors == 1) {
|
||||||
|
return whisper_full(ctx, params, samples, n_samples);
|
||||||
|
}
|
||||||
|
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
// prepare separate contexts for each thread
|
||||||
|
std::vector<struct whisper_context> ctxs(n_processors - 1);
|
||||||
|
|
||||||
|
for (int i = 0; i < n_processors - 1; ++i) {
|
||||||
|
ctxs[i] = *ctx;
|
||||||
|
|
||||||
|
auto & model = ctxs[i].model;
|
||||||
|
|
||||||
|
// create the ggml memory context
|
||||||
|
{
|
||||||
|
struct ggml_init_params params = {
|
||||||
|
.mem_size = ctxs[i].buf_memory.size(),
|
||||||
|
.mem_buffer = ctxs[i].buf_memory.data(),
|
||||||
|
};
|
||||||
|
|
||||||
|
model.ctx_mem = ggml_init(params);
|
||||||
|
if (!model.ctx_mem) {
|
||||||
|
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// separate key + value memory for each processor
|
||||||
|
{
|
||||||
|
auto & ctx = model.ctx_mem;
|
||||||
|
|
||||||
|
const auto & hparams = model.hparams;
|
||||||
|
|
||||||
|
const int n_text_state = hparams.n_text_state;
|
||||||
|
const int n_text_layer = hparams.n_text_layer;
|
||||||
|
const int n_text_ctx = hparams.n_text_ctx;
|
||||||
|
|
||||||
|
// key/value memory for the self-attention layer
|
||||||
|
{
|
||||||
|
const int n_mem = n_text_layer*n_text_ctx;
|
||||||
|
const int n_elements = n_text_state*n_mem;
|
||||||
|
|
||||||
|
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||||
|
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||||
|
}
|
||||||
|
|
||||||
|
// key/value memory for the cross-attention layer
|
||||||
|
{
|
||||||
|
const int n_audio_ctx = hparams.n_audio_ctx;
|
||||||
|
|
||||||
|
const int n_mem = n_text_layer*n_audio_ctx;
|
||||||
|
const int n_elements = n_text_state*n_mem;
|
||||||
|
|
||||||
|
model.memory_cross_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||||
|
model.memory_cross_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||||
|
}
|
||||||
|
|
||||||
|
const size_t memory_size =
|
||||||
|
ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v) +
|
||||||
|
ggml_nbytes(model.memory_cross_k) + ggml_nbytes(model.memory_cross_v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const int offset_samples = (WHISPER_SAMPLE_RATE*params.offset_ms)/1000;
|
||||||
|
const int n_samples_per_processor = (n_samples - offset_samples)/n_processors;
|
||||||
|
|
||||||
|
// the calling thread will process the first chunk
|
||||||
|
// while the other threads will process the remaining chunks
|
||||||
|
|
||||||
|
std::vector<std::thread> workers(n_processors - 1);
|
||||||
|
for (int i = 0; i < n_processors - 1; ++i) {
|
||||||
|
const int start_samples = offset_samples + (i + 1)*n_samples_per_processor;
|
||||||
|
const int n_samples_cur = (i == n_processors - 2) ? n_samples - start_samples : n_samples_per_processor;
|
||||||
|
|
||||||
|
auto params_cur = params;
|
||||||
|
|
||||||
|
params_cur.offset_ms = 0;
|
||||||
|
params_cur.print_progress = false;
|
||||||
|
params_cur.print_realtime = false;
|
||||||
|
|
||||||
|
params_cur.new_segment_callback = nullptr;
|
||||||
|
params_cur.new_segment_callback_user_data = nullptr;
|
||||||
|
|
||||||
|
workers[i] = std::thread(whisper_full, &ctxs[i], std::move(params_cur), samples + start_samples, n_samples_cur);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
auto params_cur = params;
|
||||||
|
|
||||||
|
ret = whisper_full(ctx, std::move(params_cur), samples, offset_samples + n_samples_per_processor);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < n_processors - 1; ++i) {
|
||||||
|
workers[i].join();
|
||||||
|
}
|
||||||
|
|
||||||
|
const int64_t offset_t = (int64_t) params.offset_ms/10.0;
|
||||||
|
|
||||||
|
// combine results into ctx->result_all
|
||||||
|
for (int i = 0; i < n_processors - 1; ++i) {
|
||||||
|
auto & result_all = ctxs[i].result_all;
|
||||||
|
|
||||||
|
for (int j = 0; j < (int) result_all.size(); ++j) {
|
||||||
|
result_all[j].t0 += 100*((i + 1)*n_samples_per_processor)/WHISPER_SAMPLE_RATE + offset_t;
|
||||||
|
result_all[j].t1 += 100*((i + 1)*n_samples_per_processor)/WHISPER_SAMPLE_RATE + offset_t;
|
||||||
|
|
||||||
|
if (ctx->result_all.size() > 0) {
|
||||||
|
result_all[j].t0 = std::max(result_all[j].t0, ctx->result_all.back().t1);
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx->result_all.push_back(std::move(result_all[j]));
|
||||||
|
|
||||||
|
// call the new_segment_callback for each segment
|
||||||
|
if (params.new_segment_callback) {
|
||||||
|
params.new_segment_callback(ctx, params.new_segment_callback_user_data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
int whisper_full_n_segments(struct whisper_context * ctx) {
|
int whisper_full_n_segments(struct whisper_context * ctx) {
|
||||||
return ctx->result_all.size();
|
return ctx->result_all.size();
|
||||||
}
|
}
|
||||||
|
10
whisper.h
10
whisper.h
@ -80,8 +80,6 @@ extern "C" {
|
|||||||
// Returns NULL on failure.
|
// Returns NULL on failure.
|
||||||
WHISPER_API struct whisper_context * whisper_init(const char * path_model);
|
WHISPER_API struct whisper_context * whisper_init(const char * path_model);
|
||||||
|
|
||||||
WHISPER_API struct whisper_context * whisper_init_parallel(const char * path_model, int n_processors);
|
|
||||||
|
|
||||||
// Frees all memory allocated by the model.
|
// Frees all memory allocated by the model.
|
||||||
WHISPER_API void whisper_free(struct whisper_context * ctx);
|
WHISPER_API void whisper_free(struct whisper_context * ctx);
|
||||||
|
|
||||||
@ -179,7 +177,6 @@ extern "C" {
|
|||||||
enum whisper_sampling_strategy strategy;
|
enum whisper_sampling_strategy strategy;
|
||||||
|
|
||||||
int n_threads;
|
int n_threads;
|
||||||
int n_processors;
|
|
||||||
int n_max_text_ctx;
|
int n_max_text_ctx;
|
||||||
int offset_ms;
|
int offset_ms;
|
||||||
|
|
||||||
@ -216,6 +213,13 @@ extern "C" {
|
|||||||
const float * samples,
|
const float * samples,
|
||||||
int n_samples);
|
int n_samples);
|
||||||
|
|
||||||
|
WHISPER_API int whisper_full_parallel(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_full_params params,
|
||||||
|
const float * samples,
|
||||||
|
int n_samples,
|
||||||
|
const int n_processors);
|
||||||
|
|
||||||
// Number of generated text segments.
|
// Number of generated text segments.
|
||||||
// A segment can be a few words, a sentence, or even a paragraph.
|
// A segment can be a few words, a sentence, or even a paragraph.
|
||||||
WHISPER_API int whisper_full_n_segments(struct whisper_context * ctx);
|
WHISPER_API int whisper_full_n_segments(struct whisper_context * ctx);
|
||||||
|
Loading…
Reference in New Issue
Block a user