forked from extern/whisper.cpp
Compare commits
17 Commits
diarizatio
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
09e9068007 | ||
|
fa9d43181f | ||
|
bb6b54a03d | ||
|
b597c5a779 | ||
|
a3fb6c507f | ||
|
59fdcd19c8 | ||
|
478289a4b3 | ||
|
5e94129cb2 | ||
|
72af0f5697 | ||
|
af005d573f | ||
|
ad1389003d | ||
|
f420de1322 | ||
|
d176160f6f | ||
|
ca21f7ab16 | ||
|
373043cabe | ||
|
fb4d0d470f | ||
|
0d229163bb |
1
.gitignore
vendored
1
.gitignore
vendored
@ -10,6 +10,7 @@ build-em/
|
|||||||
build-debug/
|
build-debug/
|
||||||
build-release/
|
build-release/
|
||||||
build-static/
|
build-static/
|
||||||
|
build-no-accel/
|
||||||
build-sanitize-addr/
|
build-sanitize-addr/
|
||||||
build-sanitize-thread/
|
build-sanitize-thread/
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
cmake_minimum_required (VERSION 3.0)
|
cmake_minimum_required (VERSION 3.0)
|
||||||
|
|
||||||
project(whisper.cpp VERSION 1.2.0)
|
project(whisper.cpp VERSION 1.2.1)
|
||||||
|
|
||||||
# Add path to modules
|
# Add path to modules
|
||||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
||||||
|
6
Makefile
6
Makefile
@ -30,8 +30,8 @@ endif
|
|||||||
# Compile flags
|
# Compile flags
|
||||||
#
|
#
|
||||||
|
|
||||||
CFLAGS = -I. -O3 -std=c11 -fPIC
|
CFLAGS = -I. -O3 -DNDEBUG -std=c11 -fPIC
|
||||||
CXXFLAGS = -I. -I./examples -O3 -std=c++11 -fPIC
|
CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC
|
||||||
LDFLAGS =
|
LDFLAGS =
|
||||||
|
|
||||||
# OS specific
|
# OS specific
|
||||||
@ -141,6 +141,8 @@ ifdef WHISPER_GPROF
|
|||||||
CXXFLAGS += -pg
|
CXXFLAGS += -pg
|
||||||
endif
|
endif
|
||||||
ifneq ($(filter aarch64%,$(UNAME_M)),)
|
ifneq ($(filter aarch64%,$(UNAME_M)),)
|
||||||
|
CFLAGS += -mcpu=native
|
||||||
|
CXXFLAGS += -mcpu=native
|
||||||
endif
|
endif
|
||||||
ifneq ($(filter armv6%,$(UNAME_M)),)
|
ifneq ($(filter armv6%,$(UNAME_M)),)
|
||||||
# Raspberry Pi 1, 2, 3
|
# Raspberry Pi 1, 2, 3
|
||||||
|
19
README.md
19
README.md
@ -4,7 +4,7 @@
|
|||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
[](https://www.npmjs.com/package/whisper.cpp/)
|
[](https://www.npmjs.com/package/whisper.cpp/)
|
||||||
|
|
||||||
Stable: [v1.2.0](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.2.0) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
Stable: [v1.2.1](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.2.1) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
||||||
|
|
||||||
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
||||||
|
|
||||||
@ -433,6 +433,19 @@ https://user-images.githubusercontent.com/1991296/199337538-b7b0c7a3-2753-4a88-a
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Video comparison of different models
|
||||||
|
|
||||||
|
Use the [extra/bench-wts.sh](https://github.com/ggerganov/whisper.cpp/blob/master/extra/bench-wts.sh) script to generate a video in the following format:
|
||||||
|
|
||||||
|
```java
|
||||||
|
./extra/bench-wts.sh samples/jfk.wav
|
||||||
|
ffplay ./samples/jfk.wav.all.mp4
|
||||||
|
```
|
||||||
|
|
||||||
|
https://user-images.githubusercontent.com/1991296/223206245-2d36d903-cf8e-4f09-8c3b-eb9f9c39d6fc.mp4
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Benchmarks
|
## Benchmarks
|
||||||
|
|
||||||
In order to have an objective comparison of the performance of the inference across different system configurations,
|
In order to have an objective comparison of the performance of the inference across different system configurations,
|
||||||
@ -469,7 +482,9 @@ in [models](models).
|
|||||||
- [X] .NET: | [#422](https://github.com/ggerganov/whisper.cpp/discussions/422)
|
- [X] .NET: | [#422](https://github.com/ggerganov/whisper.cpp/discussions/422)
|
||||||
- [sandrohanea/whisper.net](https://github.com/sandrohanea/whisper.net)
|
- [sandrohanea/whisper.net](https://github.com/sandrohanea/whisper.net)
|
||||||
- [NickDarvey/whisper](https://github.com/NickDarvey/whisper)
|
- [NickDarvey/whisper](https://github.com/NickDarvey/whisper)
|
||||||
- [ ] Python: soon | [WIP](https://github.com/ggerganov/whisper.cpp/issues/9)
|
- [X] Python: | [#9](https://github.com/ggerganov/whisper.cpp/issues/9)
|
||||||
|
- [stlukey/whispercpp.py](https://github.com/stlukey/whispercpp.py) (Cython)
|
||||||
|
- [aarnphm/whispercpp](https://github.com/aarnphm/whispercpp) (Pybind11)
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
|
@ -94,6 +94,7 @@ func (model *model) NewContext() (Context, error) {
|
|||||||
params.SetPrintRealtime(false)
|
params.SetPrintRealtime(false)
|
||||||
params.SetPrintTimestamps(false)
|
params.SetPrintTimestamps(false)
|
||||||
params.SetThreads(runtime.NumCPU())
|
params.SetThreads(runtime.NumCPU())
|
||||||
|
params.SetNoContext(true)
|
||||||
|
|
||||||
// Return new context
|
// Return new context
|
||||||
return newContext(model, params)
|
return newContext(model, params)
|
||||||
|
@ -20,7 +20,7 @@ extern bool callEncoderBegin(void* user_data);
|
|||||||
// Text segment callback
|
// Text segment callback
|
||||||
// Called on every newly generated text segment
|
// Called on every newly generated text segment
|
||||||
// Use the whisper_full_...() functions to obtain the text segments
|
// Use the whisper_full_...() functions to obtain the text segments
|
||||||
static void whisper_new_segment_cb(struct whisper_context* ctx, int n_new, void* user_data) {
|
static void whisper_new_segment_cb(struct whisper_context* ctx, struct whisper_state* state, int n_new, void* user_data) {
|
||||||
if(user_data != NULL && ctx != NULL) {
|
if(user_data != NULL && ctx != NULL) {
|
||||||
callNewSegment(user_data, n_new);
|
callNewSegment(user_data, n_new);
|
||||||
}
|
}
|
||||||
@ -29,7 +29,7 @@ static void whisper_new_segment_cb(struct whisper_context* ctx, int n_new, void*
|
|||||||
// Encoder begin callback
|
// Encoder begin callback
|
||||||
// If not NULL, called before the encoder starts
|
// If not NULL, called before the encoder starts
|
||||||
// If it returns false, the computation is aborted
|
// If it returns false, the computation is aborted
|
||||||
static bool whisper_encoder_begin_cb(struct whisper_context* ctx, void* user_data) {
|
static bool whisper_encoder_begin_cb(struct whisper_context* ctx, struct whisper_state* state, void* user_data) {
|
||||||
if(user_data != NULL && ctx != NULL) {
|
if(user_data != NULL && ctx != NULL) {
|
||||||
return callEncoderBegin(user_data);
|
return callEncoderBegin(user_data);
|
||||||
}
|
}
|
||||||
|
@ -1 +1 @@
|
|||||||
Subproject commit d5c6d5c8a39703153472055c13902defc7177d22
|
Subproject commit 92d4c5c9a07b726e35c20dc513532789919e00c4
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "whisper.cpp",
|
"name": "whisper.cpp",
|
||||||
"version": "1.2.0",
|
"version": "1.2.1",
|
||||||
"description": "Whisper speech recognition",
|
"description": "Whisper speech recognition",
|
||||||
"main": "whisper.js",
|
"main": "whisper.js",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
@ -199,7 +199,7 @@ static VALUE ruby_whisper_transcribe(int argc, VALUE *argv, VALUE self) {
|
|||||||
{
|
{
|
||||||
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
||||||
|
|
||||||
rwp->params.encoder_begin_callback = [](struct whisper_context * /*ctx*/, void * user_data) {
|
rwp->params.encoder_begin_callback = [](struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, void * user_data) {
|
||||||
bool is_aborted = *(bool*)user_data;
|
bool is_aborted = *(bool*)user_data;
|
||||||
return !is_aborted;
|
return !is_aborted;
|
||||||
};
|
};
|
||||||
|
@ -72,7 +72,7 @@ int timestamp_to_sample(int64_t t, int n_samples) {
|
|||||||
return std::max(0, std::min((int) n_samples - 1, (int) ((t*WHISPER_SAMPLE_RATE)/100)));
|
return std::max(0, std::min((int) n_samples - 1, (int) ((t*WHISPER_SAMPLE_RATE)/100)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void whisper_print_segment_callback(struct whisper_context * ctx, int n_new, void * user_data) {
|
void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper_state * state, int n_new, void * user_data) {
|
||||||
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
||||||
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
||||||
|
|
||||||
@ -260,7 +260,7 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
|||||||
{
|
{
|
||||||
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
||||||
|
|
||||||
wparams.encoder_begin_callback = [](struct whisper_context * /*ctx*/, void * user_data) {
|
wparams.encoder_begin_callback = [](struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, void * user_data) {
|
||||||
bool is_aborted = *(bool*)user_data;
|
bool is_aborted = *(bool*)user_data;
|
||||||
return !is_aborted;
|
return !is_aborted;
|
||||||
};
|
};
|
||||||
|
@ -80,6 +80,7 @@ struct whisper_params {
|
|||||||
|
|
||||||
std::string language = "en";
|
std::string language = "en";
|
||||||
std::string prompt;
|
std::string prompt;
|
||||||
|
std::string font_path = "/System/Library/Fonts/Supplemental/Courier New Bold.ttf";
|
||||||
std::string model = "models/ggml-base.en.bin";
|
std::string model = "models/ggml-base.en.bin";
|
||||||
|
|
||||||
std::vector<std::string> fname_inp = {};
|
std::vector<std::string> fname_inp = {};
|
||||||
@ -127,6 +128,7 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
|||||||
else if (arg == "-ovtt" || arg == "--output-vtt") { params.output_vtt = true; }
|
else if (arg == "-ovtt" || arg == "--output-vtt") { params.output_vtt = true; }
|
||||||
else if (arg == "-osrt" || arg == "--output-srt") { params.output_srt = true; }
|
else if (arg == "-osrt" || arg == "--output-srt") { params.output_srt = true; }
|
||||||
else if (arg == "-owts" || arg == "--output-words") { params.output_wts = true; }
|
else if (arg == "-owts" || arg == "--output-words") { params.output_wts = true; }
|
||||||
|
else if (arg == "-fp" || arg == "--font-path") { params.font_path = argv[++i]; }
|
||||||
else if (arg == "-ocsv" || arg == "--output-csv") { params.output_csv = true; }
|
else if (arg == "-ocsv" || arg == "--output-csv") { params.output_csv = true; }
|
||||||
else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(argv[++i]); }
|
else if (arg == "-of" || arg == "--output-file") { params.fname_out.emplace_back(argv[++i]); }
|
||||||
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; }
|
||||||
@ -174,6 +176,7 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
|||||||
fprintf(stderr, " -ovtt, --output-vtt [%-7s] output result in a vtt file\n", params.output_vtt ? "true" : "false");
|
fprintf(stderr, " -ovtt, --output-vtt [%-7s] output result in a vtt file\n", params.output_vtt ? "true" : "false");
|
||||||
fprintf(stderr, " -osrt, --output-srt [%-7s] output result in a srt file\n", params.output_srt ? "true" : "false");
|
fprintf(stderr, " -osrt, --output-srt [%-7s] output result in a srt file\n", params.output_srt ? "true" : "false");
|
||||||
fprintf(stderr, " -owts, --output-words [%-7s] output script for generating karaoke video\n", params.output_wts ? "true" : "false");
|
fprintf(stderr, " -owts, --output-words [%-7s] output script for generating karaoke video\n", params.output_wts ? "true" : "false");
|
||||||
|
fprintf(stderr, " -fp, --font-path [%-7s] path to a monospace font for karaoke video\n", params.font_path.c_str());
|
||||||
fprintf(stderr, " -ocsv, --output-csv [%-7s] output result in a CSV file\n", params.output_csv ? "true" : "false");
|
fprintf(stderr, " -ocsv, --output-csv [%-7s] output result in a CSV file\n", params.output_csv ? "true" : "false");
|
||||||
fprintf(stderr, " -of FNAME, --output-file FNAME [%-7s] output file path (without file extension)\n", "");
|
fprintf(stderr, " -of FNAME, --output-file FNAME [%-7s] output file path (without file extension)\n", "");
|
||||||
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false");
|
||||||
@ -193,7 +196,7 @@ struct whisper_print_user_data {
|
|||||||
const std::vector<std::vector<float>> * pcmf32s;
|
const std::vector<std::vector<float>> * pcmf32s;
|
||||||
};
|
};
|
||||||
|
|
||||||
void whisper_print_segment_callback(struct whisper_context * ctx, int n_new, void * user_data) {
|
void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper_state * /*state*/, int n_new, void * user_data) {
|
||||||
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
||||||
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
||||||
|
|
||||||
@ -352,13 +355,14 @@ bool output_csv(struct whisper_context * ctx, const char * fname) {
|
|||||||
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
||||||
|
|
||||||
const int n_segments = whisper_full_n_segments(ctx);
|
const int n_segments = whisper_full_n_segments(ctx);
|
||||||
|
fout << "start,end,text\n";
|
||||||
for (int i = 0; i < n_segments; ++i) {
|
for (int i = 0; i < n_segments; ++i) {
|
||||||
const char * text = whisper_full_get_segment_text(ctx, i);
|
const char * text = whisper_full_get_segment_text(ctx, i);
|
||||||
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
||||||
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
||||||
|
|
||||||
//need to multiply times returned from whisper_full_get_segment_t{0,1}() by 10 to get milliseconds.
|
//need to multiply times returned from whisper_full_get_segment_t{0,1}() by 10 to get milliseconds.
|
||||||
fout << 10 * t0 << ", " << 10 * t1 << ", \"" << text << "\"\n";
|
fout << 10 * t0 << "," << 10 * t1 << ",\"" << text << "\"\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -367,13 +371,18 @@ bool output_csv(struct whisper_context * ctx, const char * fname) {
|
|||||||
// karaoke video generation
|
// karaoke video generation
|
||||||
// outputs a bash script that uses ffmpeg to generate a video with the subtitles
|
// outputs a bash script that uses ffmpeg to generate a video with the subtitles
|
||||||
// TODO: font parameter adjustments
|
// TODO: font parameter adjustments
|
||||||
bool output_wts(struct whisper_context * ctx, const char * fname, const char * fname_inp, const whisper_params & /*params*/, float t_sec) {
|
bool output_wts(struct whisper_context * ctx, const char * fname, const char * fname_inp, const whisper_params & params, float t_sec) {
|
||||||
std::ofstream fout(fname);
|
std::ofstream fout(fname);
|
||||||
|
|
||||||
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
||||||
|
|
||||||
// TODO: become parameter
|
static const char * font = params.font_path.c_str();
|
||||||
static const char * font = "/System/Library/Fonts/Supplemental/Courier New Bold.ttf";
|
|
||||||
|
std::ifstream fin(font);
|
||||||
|
if (!fin.is_open()) {
|
||||||
|
fprintf(stderr, "%s: font not found at '%s', please specify a monospace font with -fp\n", __func__, font);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
fout << "#!/bin/bash" << "\n";
|
fout << "#!/bin/bash" << "\n";
|
||||||
fout << "\n";
|
fout << "\n";
|
||||||
@ -607,7 +616,7 @@ int main(int argc, char ** argv) {
|
|||||||
{
|
{
|
||||||
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
static bool is_aborted = false; // NOTE: this should be atomic to avoid data race
|
||||||
|
|
||||||
wparams.encoder_begin_callback = [](struct whisper_context * /*ctx*/, void * user_data) {
|
wparams.encoder_begin_callback = [](struct whisper_context * /*ctx*/, struct whisper_state * /*state*/, void * user_data) {
|
||||||
bool is_aborted = *(bool*)user_data;
|
bool is_aborted = *(bool*)user_data;
|
||||||
return !is_aborted;
|
return !is_aborted;
|
||||||
};
|
};
|
||||||
|
@ -288,7 +288,6 @@ int main(int argc, char ** argv) {
|
|||||||
wparams.print_realtime = false;
|
wparams.print_realtime = false;
|
||||||
wparams.print_timestamps = !params.no_timestamps;
|
wparams.print_timestamps = !params.no_timestamps;
|
||||||
wparams.translate = params.translate;
|
wparams.translate = params.translate;
|
||||||
wparams.no_context = true;
|
|
||||||
wparams.single_segment = !use_vad;
|
wparams.single_segment = !use_vad;
|
||||||
wparams.max_tokens = params.max_tokens;
|
wparams.max_tokens = params.max_tokens;
|
||||||
wparams.language = params.language.c_str();
|
wparams.language = params.language.c_str();
|
||||||
|
@ -9,4 +9,4 @@ To use:
|
|||||||
5. Select the "release" active build variant, and use Android Studio to run and deploy to your device.
|
5. Select the "release" active build variant, and use Android Studio to run and deploy to your device.
|
||||||
[^1]: I recommend the tiny or base models for running on an Android device.
|
[^1]: I recommend the tiny or base models for running on an Android device.
|
||||||
|
|
||||||
<img width="300" alt="image" src="https://user-images.githubusercontent.com/1991296/208154256-82d972dc-221b-48c4-bfcb-36ce68602f93.png">
|
<img width="300" alt="image" src="https://user-images.githubusercontent.com/1670775/221613663-a17bf770-27ef-45ab-9a46-a5f99ba65d2a.jpg">
|
||||||
|
@ -2,6 +2,7 @@ package com.whispercppdemo.ui.main
|
|||||||
|
|
||||||
import androidx.compose.foundation.layout.*
|
import androidx.compose.foundation.layout.*
|
||||||
import androidx.compose.foundation.rememberScrollState
|
import androidx.compose.foundation.rememberScrollState
|
||||||
|
import androidx.compose.foundation.text.selection.SelectionContainer
|
||||||
import androidx.compose.foundation.verticalScroll
|
import androidx.compose.foundation.verticalScroll
|
||||||
import androidx.compose.material3.*
|
import androidx.compose.material3.*
|
||||||
import androidx.compose.runtime.Composable
|
import androidx.compose.runtime.Composable
|
||||||
@ -19,6 +20,7 @@ fun MainScreen(viewModel: MainScreenViewModel) {
|
|||||||
canTranscribe = viewModel.canTranscribe,
|
canTranscribe = viewModel.canTranscribe,
|
||||||
isRecording = viewModel.isRecording,
|
isRecording = viewModel.isRecording,
|
||||||
messageLog = viewModel.dataLog,
|
messageLog = viewModel.dataLog,
|
||||||
|
onBenchmarkTapped = viewModel::benchmark,
|
||||||
onTranscribeSampleTapped = viewModel::transcribeSample,
|
onTranscribeSampleTapped = viewModel::transcribeSample,
|
||||||
onRecordTapped = viewModel::toggleRecord
|
onRecordTapped = viewModel::toggleRecord
|
||||||
)
|
)
|
||||||
@ -30,6 +32,7 @@ private fun MainScreen(
|
|||||||
canTranscribe: Boolean,
|
canTranscribe: Boolean,
|
||||||
isRecording: Boolean,
|
isRecording: Boolean,
|
||||||
messageLog: String,
|
messageLog: String,
|
||||||
|
onBenchmarkTapped: () -> Unit,
|
||||||
onTranscribeSampleTapped: () -> Unit,
|
onTranscribeSampleTapped: () -> Unit,
|
||||||
onRecordTapped: () -> Unit
|
onRecordTapped: () -> Unit
|
||||||
) {
|
) {
|
||||||
@ -45,8 +48,11 @@ private fun MainScreen(
|
|||||||
.padding(innerPadding)
|
.padding(innerPadding)
|
||||||
.padding(16.dp)
|
.padding(16.dp)
|
||||||
) {
|
) {
|
||||||
Row(horizontalArrangement = Arrangement.SpaceBetween) {
|
Column(verticalArrangement = Arrangement.SpaceBetween) {
|
||||||
|
Row(horizontalArrangement = Arrangement.SpaceBetween, modifier = Modifier.fillMaxWidth()) {
|
||||||
|
BenchmarkButton(enabled = canTranscribe, onClick = onBenchmarkTapped)
|
||||||
TranscribeSampleButton(enabled = canTranscribe, onClick = onTranscribeSampleTapped)
|
TranscribeSampleButton(enabled = canTranscribe, onClick = onTranscribeSampleTapped)
|
||||||
|
}
|
||||||
RecordButton(
|
RecordButton(
|
||||||
enabled = canTranscribe,
|
enabled = canTranscribe,
|
||||||
isRecording = isRecording,
|
isRecording = isRecording,
|
||||||
@ -60,7 +66,16 @@ private fun MainScreen(
|
|||||||
|
|
||||||
@Composable
|
@Composable
|
||||||
private fun MessageLog(log: String) {
|
private fun MessageLog(log: String) {
|
||||||
|
SelectionContainer() {
|
||||||
Text(modifier = Modifier.verticalScroll(rememberScrollState()), text = log)
|
Text(modifier = Modifier.verticalScroll(rememberScrollState()), text = log)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Composable
|
||||||
|
private fun BenchmarkButton(enabled: Boolean, onClick: () -> Unit) {
|
||||||
|
Button(onClick = onClick, enabled = enabled) {
|
||||||
|
Text("Benchmark")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Composable
|
@Composable
|
||||||
|
@ -41,10 +41,15 @@ class MainScreenViewModel(private val application: Application) : ViewModel() {
|
|||||||
|
|
||||||
init {
|
init {
|
||||||
viewModelScope.launch {
|
viewModelScope.launch {
|
||||||
|
printSystemInfo()
|
||||||
loadData()
|
loadData()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private suspend fun printSystemInfo() {
|
||||||
|
printMessage(String.format("System Info: %s\n", WhisperContext.getSystemInfo()));
|
||||||
|
}
|
||||||
|
|
||||||
private suspend fun loadData() {
|
private suspend fun loadData() {
|
||||||
printMessage("Loading data...\n")
|
printMessage("Loading data...\n")
|
||||||
try {
|
try {
|
||||||
@ -81,10 +86,29 @@ class MainScreenViewModel(private val application: Application) : ViewModel() {
|
|||||||
//whisperContext = WhisperContext.createContextFromFile(firstModel.absolutePath)
|
//whisperContext = WhisperContext.createContextFromFile(firstModel.absolutePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun benchmark() = viewModelScope.launch {
|
||||||
|
runBenchmark(6)
|
||||||
|
}
|
||||||
|
|
||||||
fun transcribeSample() = viewModelScope.launch {
|
fun transcribeSample() = viewModelScope.launch {
|
||||||
transcribeAudio(getFirstSample())
|
transcribeAudio(getFirstSample())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private suspend fun runBenchmark(nthreads: Int) {
|
||||||
|
if (!canTranscribe) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
canTranscribe = false
|
||||||
|
|
||||||
|
printMessage("Running benchmark. This will take minutes...\n")
|
||||||
|
whisperContext?.benchMemory(nthreads)?.let{ printMessage(it) }
|
||||||
|
printMessage("\n")
|
||||||
|
whisperContext?.benchGgmlMulMat(nthreads)?.let{ printMessage(it) }
|
||||||
|
|
||||||
|
canTranscribe = true
|
||||||
|
}
|
||||||
|
|
||||||
private suspend fun getFirstSample(): File = withContext(Dispatchers.IO) {
|
private suspend fun getFirstSample(): File = withContext(Dispatchers.IO) {
|
||||||
samplesPath.listFiles()!!.first()
|
samplesPath.listFiles()!!.first()
|
||||||
}
|
}
|
||||||
@ -114,11 +138,14 @@ class MainScreenViewModel(private val application: Application) : ViewModel() {
|
|||||||
canTranscribe = false
|
canTranscribe = false
|
||||||
|
|
||||||
try {
|
try {
|
||||||
printMessage("Reading wave samples...\n")
|
printMessage("Reading wave samples... ")
|
||||||
val data = readAudioSamples(file)
|
val data = readAudioSamples(file)
|
||||||
|
printMessage("${data.size / (16000 / 1000)} ms\n")
|
||||||
printMessage("Transcribing data...\n")
|
printMessage("Transcribing data...\n")
|
||||||
|
val start = System.currentTimeMillis()
|
||||||
val text = whisperContext?.transcribeData(data)
|
val text = whisperContext?.transcribeData(data)
|
||||||
printMessage("Done: $text\n")
|
val elapsed = System.currentTimeMillis() - start
|
||||||
|
printMessage("Done ($elapsed ms): $text\n")
|
||||||
} catch (e: Exception) {
|
} catch (e: Exception) {
|
||||||
Log.w(LOG_TAG, e)
|
Log.w(LOG_TAG, e)
|
||||||
printMessage("${e.localizedMessage}\n")
|
printMessage("${e.localizedMessage}\n")
|
||||||
|
@ -27,6 +27,14 @@ class WhisperContext private constructor(private var ptr: Long) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
suspend fun benchMemory(nthreads: Int): String = withContext(scope.coroutineContext) {
|
||||||
|
return@withContext WhisperLib.benchMemcpy(nthreads)
|
||||||
|
}
|
||||||
|
|
||||||
|
suspend fun benchGgmlMulMat(nthreads: Int): String = withContext(scope.coroutineContext) {
|
||||||
|
return@withContext WhisperLib.benchGgmlMulMat(nthreads)
|
||||||
|
}
|
||||||
|
|
||||||
suspend fun release() = withContext(scope.coroutineContext) {
|
suspend fun release() = withContext(scope.coroutineContext) {
|
||||||
if (ptr != 0L) {
|
if (ptr != 0L) {
|
||||||
WhisperLib.freeContext(ptr)
|
WhisperLib.freeContext(ptr)
|
||||||
@ -66,6 +74,10 @@ class WhisperContext private constructor(private var ptr: Long) {
|
|||||||
}
|
}
|
||||||
return WhisperContext(ptr)
|
return WhisperContext(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fun getSystemInfo(): String {
|
||||||
|
return WhisperLib.getSystemInfo()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,6 +86,7 @@ private class WhisperLib {
|
|||||||
init {
|
init {
|
||||||
Log.d(LOG_TAG, "Primary ABI: ${Build.SUPPORTED_ABIS[0]}")
|
Log.d(LOG_TAG, "Primary ABI: ${Build.SUPPORTED_ABIS[0]}")
|
||||||
var loadVfpv4 = false
|
var loadVfpv4 = false
|
||||||
|
var loadV8fp16 = false
|
||||||
if (isArmEabiV7a()) {
|
if (isArmEabiV7a()) {
|
||||||
// armeabi-v7a needs runtime detection support
|
// armeabi-v7a needs runtime detection support
|
||||||
val cpuInfo = cpuInfo()
|
val cpuInfo = cpuInfo()
|
||||||
@ -84,11 +97,24 @@ private class WhisperLib {
|
|||||||
loadVfpv4 = true
|
loadVfpv4 = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else if (isArmEabiV8a()) {
|
||||||
|
// ARMv8.2a needs runtime detection support
|
||||||
|
val cpuInfo = cpuInfo()
|
||||||
|
cpuInfo?.let {
|
||||||
|
Log.d(LOG_TAG, "CPU info: $cpuInfo")
|
||||||
|
if (cpuInfo.contains("fphp")) {
|
||||||
|
Log.d(LOG_TAG, "CPU supports fp16 arithmetic")
|
||||||
|
loadV8fp16 = true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (loadVfpv4) {
|
if (loadVfpv4) {
|
||||||
Log.d(LOG_TAG, "Loading libwhisper_vfpv4.so")
|
Log.d(LOG_TAG, "Loading libwhisper_vfpv4.so")
|
||||||
System.loadLibrary("whisper_vfpv4")
|
System.loadLibrary("whisper_vfpv4")
|
||||||
|
} else if (loadV8fp16) {
|
||||||
|
Log.d(LOG_TAG, "Loading libwhisper_v8fp16_va.so")
|
||||||
|
System.loadLibrary("whisper_v8fp16_va")
|
||||||
} else {
|
} else {
|
||||||
Log.d(LOG_TAG, "Loading libwhisper.so")
|
Log.d(LOG_TAG, "Loading libwhisper.so")
|
||||||
System.loadLibrary("whisper")
|
System.loadLibrary("whisper")
|
||||||
@ -103,6 +129,9 @@ private class WhisperLib {
|
|||||||
external fun fullTranscribe(contextPtr: Long, audioData: FloatArray)
|
external fun fullTranscribe(contextPtr: Long, audioData: FloatArray)
|
||||||
external fun getTextSegmentCount(contextPtr: Long): Int
|
external fun getTextSegmentCount(contextPtr: Long): Int
|
||||||
external fun getTextSegment(contextPtr: Long, index: Int): String
|
external fun getTextSegment(contextPtr: Long, index: Int): String
|
||||||
|
external fun getSystemInfo(): String
|
||||||
|
external fun benchMemcpy(nthread: Int): String
|
||||||
|
external fun benchGgmlMulMat(nthread: Int): String
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,6 +139,10 @@ private fun isArmEabiV7a(): Boolean {
|
|||||||
return Build.SUPPORTED_ABIS[0].equals("armeabi-v7a")
|
return Build.SUPPORTED_ABIS[0].equals("armeabi-v7a")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private fun isArmEabiV8a(): Boolean {
|
||||||
|
return Build.SUPPORTED_ABIS[0].equals("arm64-v8a")
|
||||||
|
}
|
||||||
|
|
||||||
private fun cpuInfo(): String? {
|
private fun cpuInfo(): String? {
|
||||||
return try {
|
return try {
|
||||||
File("/proc/cpuinfo").inputStream().bufferedReader().use {
|
File("/proc/cpuinfo").inputStream().bufferedReader().use {
|
||||||
|
@ -13,3 +13,14 @@ ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
|
|||||||
LOCAL_CFLAGS += -mfpu=neon-vfpv4
|
LOCAL_CFLAGS += -mfpu=neon-vfpv4
|
||||||
include $(BUILD_SHARED_LIBRARY)
|
include $(BUILD_SHARED_LIBRARY)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifeq ($(TARGET_ARCH_ABI),arm64-v8a)
|
||||||
|
include $(CLEAR_VARS)
|
||||||
|
LOCAL_MODULE := libwhisper_v8fp16_va
|
||||||
|
include $(LOCAL_PATH)/Whisper.mk
|
||||||
|
# Allow building NEON FMA code.
|
||||||
|
# https://android.googlesource.com/platform/ndk/+/master/sources/android/cpufeatures/cpu-features.h
|
||||||
|
LOCAL_CFLAGS += -march=armv8.2-a+fp16
|
||||||
|
include $(BUILD_SHARED_LIBRARY)
|
||||||
|
endif
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <sys/sysinfo.h>
|
#include <sys/sysinfo.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include "whisper.h"
|
#include "whisper.h"
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
#define UNUSED(x) (void)(x)
|
#define UNUSED(x) (void)(x)
|
||||||
#define TAG "JNI"
|
#define TAG "JNI"
|
||||||
@ -214,3 +215,29 @@ Java_com_whispercppdemo_whisper_WhisperLib_00024Companion_getTextSegment(
|
|||||||
jstring string = (*env)->NewStringUTF(env, text);
|
jstring string = (*env)->NewStringUTF(env, text);
|
||||||
return string;
|
return string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jstring JNICALL
|
||||||
|
Java_com_whispercppdemo_whisper_WhisperLib_00024Companion_getSystemInfo(
|
||||||
|
JNIEnv *env, jobject thiz
|
||||||
|
) {
|
||||||
|
UNUSED(thiz);
|
||||||
|
const char *sysinfo = whisper_print_system_info();
|
||||||
|
jstring string = (*env)->NewStringUTF(env, sysinfo);
|
||||||
|
return string;
|
||||||
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jstring JNICALL
|
||||||
|
Java_com_whispercppdemo_whisper_WhisperLib_00024Companion_benchMemcpy(JNIEnv *env, jobject thiz,
|
||||||
|
jint n_threads) {
|
||||||
|
UNUSED(thiz);
|
||||||
|
const char *bench_ggml_memcpy = whisper_bench_memcpy_str(n_threads);
|
||||||
|
jstring string = (*env)->NewStringUTF(env, bench_ggml_memcpy);
|
||||||
|
}
|
||||||
|
|
||||||
|
JNIEXPORT jstring JNICALL
|
||||||
|
Java_com_whispercppdemo_whisper_WhisperLib_00024Companion_benchGgmlMulMat(JNIEnv *env, jobject thiz,
|
||||||
|
jint n_threads) {
|
||||||
|
UNUSED(thiz);
|
||||||
|
const char *bench_ggml_mul_mat = whisper_bench_ggml_mul_mat_str(n_threads);
|
||||||
|
jstring string = (*env)->NewStringUTF(env, bench_ggml_mul_mat);
|
||||||
|
}
|
||||||
|
70
extra/bench-wts.sh
Executable file
70
extra/bench-wts.sh
Executable file
@ -0,0 +1,70 @@
|
|||||||
|
# Benchmark word-level timestamps for different models
|
||||||
|
#
|
||||||
|
# This script takes two arguments
|
||||||
|
# - an audio file
|
||||||
|
# - [optional] path to a font file
|
||||||
|
|
||||||
|
# I'm using "/usr/share/fonts/truetype/freefont/FreeMono.ttf" on Ubuntu
|
||||||
|
|
||||||
|
if [ -z "$1" ]; then
|
||||||
|
echo "Usage: $0 <audio file> [font file]"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
#TODO: Make this a command line parameter
|
||||||
|
#models="base small large"
|
||||||
|
#models="tiny.en tiny base.en base small.en small medium.en medium large-v1 large"
|
||||||
|
models="tiny.en base.en small.en medium.en large"
|
||||||
|
|
||||||
|
DURATION=$(ffprobe -i $1 -show_entries format=duration -v quiet -of csv="p=0")
|
||||||
|
DURATION=$(printf "%.2f" $DURATION)
|
||||||
|
echo "Input file duration: ${DURATION}s"
|
||||||
|
|
||||||
|
for model in $models; do
|
||||||
|
echo "Running $model"
|
||||||
|
COMMAND="./main -m models/ggml-$model.bin -owts -f $1 -of $1.$model"
|
||||||
|
|
||||||
|
if [ ! -z "$2" ]; then
|
||||||
|
COMMAND="$COMMAND -fp $2"
|
||||||
|
fi
|
||||||
|
#TODO: Surface errors better
|
||||||
|
# TIMEFMT is for zsh, TIMEFORMAT is for bash
|
||||||
|
EXECTIME=$({ TIMEFMT="%E";TIMEFORMAT=%E; time $COMMAND >/dev/null 2>&1; } 2>&1)
|
||||||
|
|
||||||
|
# Slightly different formats between zsh and bash
|
||||||
|
if [ "${EXECTIME: -1}" == "s" ]; then
|
||||||
|
EXECTIME=${EXECTIME::-1}
|
||||||
|
fi
|
||||||
|
|
||||||
|
RATIO=$(echo "$DURATION / $EXECTIME" | bc -l)
|
||||||
|
RATIO=$(printf "%.2f" $RATIO)
|
||||||
|
|
||||||
|
echo "Execution time: ${EXECTIME}s (${RATIO}x realtime)"
|
||||||
|
|
||||||
|
# If the file already exists, delete it
|
||||||
|
if [ -f $1.mp4 ]; then
|
||||||
|
rm $1.mp4
|
||||||
|
fi
|
||||||
|
|
||||||
|
bash $1.$model.wts >/dev/null 2>&1
|
||||||
|
mv $1.mp4 $1.$model.mp4
|
||||||
|
|
||||||
|
ffmpeg -y -f lavfi -i color=c=black:s=1200x50:d=$DURATION -vf "drawtext=fontfile=$2:fontsize=36:x=10:y=(h-text_h)/2:text='ggml-$model - ${EXECTIME}s (${RATIO}x realtime)':fontcolor=lightgrey" $1.$model.info.mp4 >/dev/null 2>&1
|
||||||
|
done
|
||||||
|
|
||||||
|
COMMAND="ffmpeg -y"
|
||||||
|
for model in $models; do
|
||||||
|
COMMAND="$COMMAND -i $1.$model.info.mp4 -i $1.$model.mp4"
|
||||||
|
done
|
||||||
|
COMMAND="$COMMAND -filter_complex \""
|
||||||
|
COUNT=0
|
||||||
|
for model in $models; do
|
||||||
|
COMMAND="$COMMAND[${COUNT}:v][$(($COUNT+1)):v]"
|
||||||
|
COUNT=$((COUNT+2))
|
||||||
|
done
|
||||||
|
COMMAND="$COMMAND vstack=inputs=${COUNT}[v]\" -map \"[v]\" -map 1:a $1.all.mp4 >/dev/null 2>&1"
|
||||||
|
|
||||||
|
echo $COMMAND
|
||||||
|
|
||||||
|
# Run the command
|
||||||
|
eval $COMMAND
|
913
whisper.cpp
913
whisper.cpp
File diff suppressed because it is too large
Load Diff
160
whisper.h
160
whisper.h
@ -66,6 +66,7 @@ extern "C" {
|
|||||||
//
|
//
|
||||||
|
|
||||||
struct whisper_context;
|
struct whisper_context;
|
||||||
|
struct whisper_state;
|
||||||
|
|
||||||
typedef int whisper_token;
|
typedef int whisper_token;
|
||||||
|
|
||||||
@ -101,11 +102,20 @@ extern "C" {
|
|||||||
WHISPER_API struct whisper_context * whisper_init_from_buffer(void * buffer, size_t buffer_size);
|
WHISPER_API struct whisper_context * whisper_init_from_buffer(void * buffer, size_t buffer_size);
|
||||||
WHISPER_API struct whisper_context * whisper_init(struct whisper_model_loader * loader);
|
WHISPER_API struct whisper_context * whisper_init(struct whisper_model_loader * loader);
|
||||||
|
|
||||||
// Frees all memory allocated by the model.
|
// These are the same as the above, but the internal state of the context is not allocated automatically
|
||||||
WHISPER_API void whisper_free(struct whisper_context * ctx);
|
// It is the responsibility of the caller to allocate the state using whisper_init_state() (#523)
|
||||||
|
WHISPER_API struct whisper_context * whisper_init_from_file_no_state(const char * path_model);
|
||||||
|
WHISPER_API struct whisper_context * whisper_init_from_buffer_no_state(void * buffer, size_t buffer_size);
|
||||||
|
WHISPER_API struct whisper_context * whisper_init_no_state(struct whisper_model_loader * loader);
|
||||||
|
|
||||||
|
WHISPER_API struct whisper_state * whisper_init_state(struct whisper_context * ctx);
|
||||||
|
|
||||||
|
// Frees all allocated memory
|
||||||
|
WHISPER_API void whisper_free (struct whisper_context * ctx);
|
||||||
|
WHISPER_API void whisper_free_state(struct whisper_state * state);
|
||||||
|
|
||||||
// Convert RAW PCM audio to log mel spectrogram.
|
// Convert RAW PCM audio to log mel spectrogram.
|
||||||
// The resulting spectrogram is stored inside the provided whisper context.
|
// The resulting spectrogram is stored inside the default state of the provided whisper context.
|
||||||
// Returns 0 on success
|
// Returns 0 on success
|
||||||
WHISPER_API int whisper_pcm_to_mel(
|
WHISPER_API int whisper_pcm_to_mel(
|
||||||
struct whisper_context * ctx,
|
struct whisper_context * ctx,
|
||||||
@ -113,17 +123,30 @@ extern "C" {
|
|||||||
int n_samples,
|
int n_samples,
|
||||||
int n_threads);
|
int n_threads);
|
||||||
|
|
||||||
// Convert RAW PCM audio to log mel spectrogram but applies a Phase Vocoder to speed up the audio x2.
|
WHISPER_API int whisper_pcm_to_mel_with_state(
|
||||||
// The resulting spectrogram is stored inside the provided whisper context.
|
struct whisper_context * ctx,
|
||||||
// Returns 0 on success
|
struct whisper_state * state,
|
||||||
WHISPER_API int whisper_pcm_to_mel_phase_vocoder(
|
const float * samples,
|
||||||
struct whisper_context* ctx,
|
|
||||||
const float* samples,
|
|
||||||
int n_samples,
|
int n_samples,
|
||||||
int n_threads);
|
int n_threads);
|
||||||
|
|
||||||
|
// Convert RAW PCM audio to log mel spectrogram but applies a Phase Vocoder to speed up the audio x2.
|
||||||
|
// The resulting spectrogram is stored inside the default state of the provided whisper context.
|
||||||
|
// Returns 0 on success
|
||||||
|
WHISPER_API int whisper_pcm_to_mel_phase_vocoder(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
const float * samples,
|
||||||
|
int n_samples,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
// This can be used to set a custom log mel spectrogram inside the provided whisper context.
|
WHISPER_API int whisper_pcm_to_mel_phase_vocoder_with_state(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
const float * samples,
|
||||||
|
int n_samples,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
|
// This can be used to set a custom log mel spectrogram inside the default state of the provided whisper context.
|
||||||
// Use this instead of whisper_pcm_to_mel() if you want to provide your own log mel spectrogram.
|
// Use this instead of whisper_pcm_to_mel() if you want to provide your own log mel spectrogram.
|
||||||
// n_mel must be 80
|
// n_mel must be 80
|
||||||
// Returns 0 on success
|
// Returns 0 on success
|
||||||
@ -133,7 +156,14 @@ extern "C" {
|
|||||||
int n_len,
|
int n_len,
|
||||||
int n_mel);
|
int n_mel);
|
||||||
|
|
||||||
// Run the Whisper encoder on the log mel spectrogram stored inside the provided whisper context.
|
WHISPER_API int whisper_set_mel_with_state(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
const float * data,
|
||||||
|
int n_len,
|
||||||
|
int n_mel);
|
||||||
|
|
||||||
|
// Run the Whisper encoder on the log mel spectrogram stored inside the default state in the provided whisper context.
|
||||||
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first.
|
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first.
|
||||||
// offset can be used to specify the offset of the first frame in the spectrogram.
|
// offset can be used to specify the offset of the first frame in the spectrogram.
|
||||||
// Returns 0 on success
|
// Returns 0 on success
|
||||||
@ -142,6 +172,12 @@ extern "C" {
|
|||||||
int offset,
|
int offset,
|
||||||
int n_threads);
|
int n_threads);
|
||||||
|
|
||||||
|
WHISPER_API int whisper_encode_with_state(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
int offset,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
// Run the Whisper decoder to obtain the logits and probabilities for the next token.
|
// Run the Whisper decoder to obtain the logits and probabilities for the next token.
|
||||||
// Make sure to call whisper_encode() first.
|
// Make sure to call whisper_encode() first.
|
||||||
// tokens + n_tokens is the provided context for the decoder.
|
// tokens + n_tokens is the provided context for the decoder.
|
||||||
@ -155,6 +191,14 @@ extern "C" {
|
|||||||
int n_past,
|
int n_past,
|
||||||
int n_threads);
|
int n_threads);
|
||||||
|
|
||||||
|
WHISPER_API int whisper_decode_with_state(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
const whisper_token * tokens,
|
||||||
|
int n_tokens,
|
||||||
|
int n_past,
|
||||||
|
int n_threads);
|
||||||
|
|
||||||
// Convert the provided text into tokens.
|
// Convert the provided text into tokens.
|
||||||
// The tokens pointer must be large enough to hold the resulting tokens.
|
// The tokens pointer must be large enough to hold the resulting tokens.
|
||||||
// Returns the number of tokens on success, no more than n_max_tokens
|
// Returns the number of tokens on success, no more than n_max_tokens
|
||||||
@ -190,17 +234,26 @@ extern "C" {
|
|||||||
int n_threads,
|
int n_threads,
|
||||||
float * lang_probs);
|
float * lang_probs);
|
||||||
|
|
||||||
|
WHISPER_API int whisper_lang_auto_detect_with_state(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
int offset_ms,
|
||||||
|
int n_threads,
|
||||||
|
float * lang_probs);
|
||||||
|
|
||||||
WHISPER_API int whisper_n_len (struct whisper_context * ctx); // mel length
|
WHISPER_API int whisper_n_len (struct whisper_context * ctx); // mel length
|
||||||
|
WHISPER_API int whisper_n_len_from_state(struct whisper_state * state); // mel length
|
||||||
WHISPER_API int whisper_n_vocab (struct whisper_context * ctx);
|
WHISPER_API int whisper_n_vocab (struct whisper_context * ctx);
|
||||||
WHISPER_API int whisper_n_text_ctx (struct whisper_context * ctx);
|
WHISPER_API int whisper_n_text_ctx (struct whisper_context * ctx);
|
||||||
WHISPER_API int whisper_n_audio_ctx (struct whisper_context * ctx);
|
WHISPER_API int whisper_n_audio_ctx (struct whisper_context * ctx);
|
||||||
WHISPER_API int whisper_is_multilingual(struct whisper_context * ctx);
|
WHISPER_API int whisper_is_multilingual (struct whisper_context * ctx);
|
||||||
|
|
||||||
// Token logits obtained from the last call to whisper_decode()
|
// Token logits obtained from the last call to whisper_decode()
|
||||||
// The logits for the last token are stored in the last row
|
// The logits for the last token are stored in the last row
|
||||||
// Rows: n_tokens
|
// Rows: n_tokens
|
||||||
// Cols: n_vocab
|
// Cols: n_vocab
|
||||||
WHISPER_API float * whisper_get_logits(struct whisper_context * ctx);
|
WHISPER_API float * whisper_get_logits (struct whisper_context * ctx);
|
||||||
|
WHISPER_API float * whisper_get_logits_from_state(struct whisper_state * state);
|
||||||
|
|
||||||
// Token Id -> String. Uses the vocabulary in the provided context
|
// Token Id -> String. Uses the vocabulary in the provided context
|
||||||
WHISPER_API const char * whisper_token_to_str(struct whisper_context * ctx, whisper_token token);
|
WHISPER_API const char * whisper_token_to_str(struct whisper_context * ctx, whisper_token token);
|
||||||
@ -218,7 +271,7 @@ extern "C" {
|
|||||||
WHISPER_API whisper_token whisper_token_translate (void);
|
WHISPER_API whisper_token whisper_token_translate (void);
|
||||||
WHISPER_API whisper_token whisper_token_transcribe(void);
|
WHISPER_API whisper_token whisper_token_transcribe(void);
|
||||||
|
|
||||||
// Performance information
|
// Performance information from the default state.
|
||||||
WHISPER_API void whisper_print_timings(struct whisper_context * ctx);
|
WHISPER_API void whisper_print_timings(struct whisper_context * ctx);
|
||||||
WHISPER_API void whisper_reset_timings(struct whisper_context * ctx);
|
WHISPER_API void whisper_reset_timings(struct whisper_context * ctx);
|
||||||
|
|
||||||
@ -236,12 +289,23 @@ extern "C" {
|
|||||||
// Text segment callback
|
// Text segment callback
|
||||||
// Called on every newly generated text segment
|
// Called on every newly generated text segment
|
||||||
// Use the whisper_full_...() functions to obtain the text segments
|
// Use the whisper_full_...() functions to obtain the text segments
|
||||||
typedef void (*whisper_new_segment_callback)(struct whisper_context * ctx, int n_new, void * user_data);
|
typedef void (*whisper_new_segment_callback)(struct whisper_context * ctx, struct whisper_state * state, int n_new, void * user_data);
|
||||||
|
|
||||||
// Encoder begin callback
|
// Encoder begin callback
|
||||||
// If not NULL, called before the encoder starts
|
// If not NULL, called before the encoder starts
|
||||||
// If it returns false, the computation is aborted
|
// If it returns false, the computation is aborted
|
||||||
typedef bool (*whisper_encoder_begin_callback)(struct whisper_context * ctx, void * user_data);
|
typedef bool (*whisper_encoder_begin_callback)(struct whisper_context * ctx, struct whisper_state * state, void * user_data);
|
||||||
|
|
||||||
|
// Logits filter callback
|
||||||
|
// Can be used to modify the logits before sampling
|
||||||
|
// If not NULL, called after applying temperature to logits
|
||||||
|
typedef void (*whisper_logits_filter_callback)(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
const whisper_token_data * tokens,
|
||||||
|
int n_tokens,
|
||||||
|
float * logits,
|
||||||
|
void * user_data);
|
||||||
|
|
||||||
// Parameters for the whisper_full() function
|
// Parameters for the whisper_full() function
|
||||||
// If you chnage the order or add new parameters, make sure to update the default values in whisper.cpp:
|
// If you chnage the order or add new parameters, make sure to update the default values in whisper.cpp:
|
||||||
@ -315,11 +379,16 @@ extern "C" {
|
|||||||
// called each time before the encoder starts
|
// called each time before the encoder starts
|
||||||
whisper_encoder_begin_callback encoder_begin_callback;
|
whisper_encoder_begin_callback encoder_begin_callback;
|
||||||
void * encoder_begin_callback_user_data;
|
void * encoder_begin_callback_user_data;
|
||||||
|
|
||||||
|
// called by each decoder to filter obtained logits
|
||||||
|
whisper_logits_filter_callback logits_filter_callback;
|
||||||
|
void * logits_filter_callback_user_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
WHISPER_API struct whisper_full_params whisper_full_default_params(enum whisper_sampling_strategy strategy);
|
WHISPER_API struct whisper_full_params whisper_full_default_params(enum whisper_sampling_strategy strategy);
|
||||||
|
|
||||||
// Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text
|
// Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text
|
||||||
|
// Not thread safe for same context
|
||||||
// Uses the specified decoding strategy to obtain the text.
|
// Uses the specified decoding strategy to obtain the text.
|
||||||
WHISPER_API int whisper_full(
|
WHISPER_API int whisper_full(
|
||||||
struct whisper_context * ctx,
|
struct whisper_context * ctx,
|
||||||
@ -327,7 +396,16 @@ extern "C" {
|
|||||||
const float * samples,
|
const float * samples,
|
||||||
int n_samples);
|
int n_samples);
|
||||||
|
|
||||||
// Split the input audio in chunks and process each chunk separately using whisper_full()
|
WHISPER_API int whisper_full_with_state(
|
||||||
|
struct whisper_context * ctx,
|
||||||
|
struct whisper_state * state,
|
||||||
|
struct whisper_full_params params,
|
||||||
|
const float * samples,
|
||||||
|
int n_samples);
|
||||||
|
|
||||||
|
// Split the input audio in chunks and process each chunk separately using whisper_full_with_state()
|
||||||
|
// Result is stored in the default state of the context
|
||||||
|
// Not thread safe if executed in parallel on the same context.
|
||||||
// It seems this approach can offer some speedup in some cases.
|
// It seems this approach can offer some speedup in some cases.
|
||||||
// However, the transcription accuracy can be worse at the beginning and end of each chunk.
|
// However, the transcription accuracy can be worse at the beginning and end of each chunk.
|
||||||
WHISPER_API int whisper_full_parallel(
|
WHISPER_API int whisper_full_parallel(
|
||||||
@ -337,40 +415,56 @@ extern "C" {
|
|||||||
int n_samples,
|
int n_samples,
|
||||||
int n_processors);
|
int n_processors);
|
||||||
|
|
||||||
// Number of generated text segments.
|
// Number of generated text segments
|
||||||
// A segment can be a few words, a sentence, or even a paragraph.
|
// A segment can be a few words, a sentence, or even a paragraph.
|
||||||
WHISPER_API int whisper_full_n_segments(struct whisper_context * ctx);
|
WHISPER_API int whisper_full_n_segments (struct whisper_context * ctx);
|
||||||
|
WHISPER_API int whisper_full_n_segments_from_state(struct whisper_state * state);
|
||||||
|
|
||||||
// Language id associated with the current context
|
// Language id associated with the context's default state
|
||||||
WHISPER_API int whisper_full_lang_id(struct whisper_context * ctx);
|
WHISPER_API int whisper_full_lang_id(struct whisper_context * ctx);
|
||||||
|
|
||||||
// Get the start and end time of the specified segment.
|
// Language id associated with the provided state
|
||||||
WHISPER_API int64_t whisper_full_get_segment_t0(struct whisper_context * ctx, int i_segment);
|
WHISPER_API int whisper_full_lang_id_from_state(struct whisper_state * state);
|
||||||
WHISPER_API int64_t whisper_full_get_segment_t1(struct whisper_context * ctx, int i_segment);
|
|
||||||
|
|
||||||
// Get the text of the specified segment.
|
// Get the start and end time of the specified segment
|
||||||
WHISPER_API const char * whisper_full_get_segment_text(struct whisper_context * ctx, int i_segment);
|
WHISPER_API int64_t whisper_full_get_segment_t0 (struct whisper_context * ctx, int i_segment);
|
||||||
|
WHISPER_API int64_t whisper_full_get_segment_t0_from_state(struct whisper_state * state, int i_segment);
|
||||||
|
|
||||||
// Get number of tokens in the specified segment.
|
WHISPER_API int64_t whisper_full_get_segment_t1 (struct whisper_context * ctx, int i_segment);
|
||||||
WHISPER_API int whisper_full_n_tokens(struct whisper_context * ctx, int i_segment);
|
WHISPER_API int64_t whisper_full_get_segment_t1_from_state(struct whisper_state * state, int i_segment);
|
||||||
|
|
||||||
|
// Get the text of the specified segment
|
||||||
|
WHISPER_API const char * whisper_full_get_segment_text (struct whisper_context * ctx, int i_segment);
|
||||||
|
WHISPER_API const char * whisper_full_get_segment_text_from_state(struct whisper_state * state, int i_segment);
|
||||||
|
|
||||||
|
// Get number of tokens in the specified segment
|
||||||
|
WHISPER_API int whisper_full_n_tokens (struct whisper_context * ctx, int i_segment);
|
||||||
|
WHISPER_API int whisper_full_n_tokens_from_state(struct whisper_state * state, int i_segment);
|
||||||
|
|
||||||
|
// Get the token text of the specified token in the specified segment
|
||||||
|
WHISPER_API const char * whisper_full_get_token_text (struct whisper_context * ctx, int i_segment, int i_token);
|
||||||
|
WHISPER_API const char * whisper_full_get_token_text_from_state(struct whisper_context * ctx, struct whisper_state * state, int i_segment, int i_token);
|
||||||
|
|
||||||
// Get the token text of the specified token in the specified segment.
|
|
||||||
WHISPER_API const char * whisper_full_get_token_text(struct whisper_context * ctx, int i_segment, int i_token);
|
|
||||||
WHISPER_API whisper_token whisper_full_get_token_id (struct whisper_context * ctx, int i_segment, int i_token);
|
WHISPER_API whisper_token whisper_full_get_token_id (struct whisper_context * ctx, int i_segment, int i_token);
|
||||||
|
WHISPER_API whisper_token whisper_full_get_token_id_from_state(struct whisper_state * state, int i_segment, int i_token);
|
||||||
|
|
||||||
// Get token data for the specified token in the specified segment.
|
// Get token data for the specified token in the specified segment
|
||||||
// This contains probabilities, timestamps, etc.
|
// This contains probabilities, timestamps, etc.
|
||||||
WHISPER_API whisper_token_data whisper_full_get_token_data(struct whisper_context * ctx, int i_segment, int i_token);
|
WHISPER_API whisper_token_data whisper_full_get_token_data (struct whisper_context * ctx, int i_segment, int i_token);
|
||||||
|
WHISPER_API whisper_token_data whisper_full_get_token_data_from_state(struct whisper_state * state, int i_segment, int i_token);
|
||||||
|
|
||||||
// Get the probability of the specified token in the specified segment.
|
// Get the probability of the specified token in the specified segment
|
||||||
WHISPER_API float whisper_full_get_token_p(struct whisper_context * ctx, int i_segment, int i_token);
|
WHISPER_API float whisper_full_get_token_p (struct whisper_context * ctx, int i_segment, int i_token);
|
||||||
|
WHISPER_API float whisper_full_get_token_p_from_state(struct whisper_state * state, int i_segment, int i_token);
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
// Temporary helpers needed for exposing ggml interface
|
// Temporary helpers needed for exposing ggml interface
|
||||||
|
|
||||||
WHISPER_API int whisper_bench_memcpy(int n_threads);
|
WHISPER_API int whisper_bench_memcpy(int n_threads);
|
||||||
|
WHISPER_API const char * whisper_bench_memcpy_str(int n_threads);
|
||||||
WHISPER_API int whisper_bench_ggml_mul_mat(int n_threads);
|
WHISPER_API int whisper_bench_ggml_mul_mat(int n_threads);
|
||||||
|
WHISPER_API const char * whisper_bench_ggml_mul_mat_str(int n_threads);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user