Compare commits

...

7 Commits

Author SHA1 Message Date
fa9621e5e9 mtl : update Makefile to support Metal 2022-11-12 08:32:03 +02:00
b5d3521626 mtl : matrix multiplication support
Seems to be only marginally faster compared to pure AMX
2022-11-09 18:29:38 +02:00
4e5674a5d5 sync : submodule whisper.spm 2022-11-07 21:48:13 +02:00
4c66b6a828 cmake : add submodule whisper.spm 2022-11-07 20:50:24 +02:00
c30bffc8a5 ref #22 : add "duration" option
Can be used to partially process a recording
2022-11-07 20:14:52 +02:00
8fdfb0ba92 Update README.md 2022-11-06 21:04:21 +02:00
c71363f14c examples : add simple script for generating Karaoke video 2022-11-06 09:22:50 +02:00
13 changed files with 444 additions and 16 deletions

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "bindings/ios"]
path = bindings/ios
url = https://github.com/ggerganov/whisper.spm

View File

@ -9,6 +9,11 @@ if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
set(WHISPER_STANDALONE ON)
include(cmake/GitVars.cmake)
include(cmake/BuildTypes.cmake)
# configure project version
if (EXISTS "${CMAKE_SOURCE_DIR}/bindings/ios/Makefile-tmpl")
configure_file(${CMAKE_SOURCE_DIR}/bindings/ios/Makefile-tmpl ${CMAKE_SOURCE_DIR}/bindings/ios/Makefile @ONLY)
endif()
else()
set(WHISPER_STANDALONE OFF)
endif()
@ -89,6 +94,17 @@ if (APPLE AND NOT WHISPER_NO_ACCELERATE)
else()
message(WARNING "Accelerate framework not found")
endif()
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
find_library(METAL_FRAMEWORK Metal REQUIRED)
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
find_library(METALPERFORMANCE_FRAMEWORK MetalPerformanceShaders REQUIRED)
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS}
${FOUNDATION_LIBRARY}
${METAL_FRAMEWORK}
${METALKIT_FRAMEWORK}
${METALPERFORMANCE_FRAMEWORK})
endif()
if (WHISPER_SUPPORT_OPENBLAS)
@ -163,6 +179,7 @@ set(TARGET whisper)
add_library(${TARGET}
ggml.c
ggml-mtl.m
whisper.cpp
)

View File

@ -58,8 +58,8 @@ endif
ifndef WHISPER_NO_ACCELERATE
# Mac M1 - include Accelerate framework
ifeq ($(UNAME_S),Darwin)
CFLAGS += -DGGML_USE_ACCELERATE
LDFLAGS += -framework Accelerate
CFLAGS += -DGGML_USE_ACCELERATE -DGGML_PERF
LDFLAGS += -framework Foundation -framework Accelerate -framework Metal -framework MetalKit -framework MetalPerformanceShaders
endif
endif
ifneq ($(filter aarch64%,$(UNAME_M)),)
@ -81,18 +81,21 @@ endif
# Build library + main
#
main: examples/main/main.cpp ggml.o whisper.o
$(CXX) $(CXXFLAGS) examples/main/main.cpp whisper.o ggml.o -o main $(LDFLAGS)
main: examples/main/main.cpp ggml.o ggml-mtl.o whisper.o
$(CXX) $(CXXFLAGS) examples/main/main.cpp whisper.o ggml.o ggml-mtl.o -o main $(LDFLAGS)
./main -h
ggml.o: ggml.c ggml.h
$(CC) $(CFLAGS) -c ggml.c -o ggml.o
ggml-mtl.o: ggml-mtl.m ggml-mtl.h
$(CC) $(CFLAGS) -c ggml-mtl.m -o ggml-mtl.o
whisper.o: whisper.cpp whisper.h
$(CXX) $(CXXFLAGS) -c whisper.cpp -o whisper.o
libwhisper.a: ggml.o whisper.o
$(AR) rcs libwhisper.a ggml.o whisper.o
libwhisper.a: ggml.o ggml-mtl.o whisper.o
$(AR) rcs libwhisper.a ggml.o ggml-mtl.o whisper.o
clean:
rm -f *.o main stream bench libwhisper.a

View File

@ -437,9 +437,12 @@ For more details, see the conversion script [models/convert-pt-to-ggml.py](model
## Bindings
- [X] Rust: [tazz4843/whisper-rs](https://github.com/tazz4843/whisper-rs)
- [X] Objective-C / Swift: [ggerganov/whisper.spm](https://github.com/ggerganov/whisper.spm)
- [ ] Python:
- [ ] Java:
## Examples
There are various examples of using the library for different projects in the [examples](examples) folder. Check them out!
## [Frequently asked questions (#126)](https://github.com/ggerganov/whisper.cpp/discussions/126)

1
bindings/ios Submodule

Submodule bindings/ios added at 4bda8e9d80

49
examples/generate-karaoke.sh Executable file
View File

@ -0,0 +1,49 @@
#!/bin/bash
executable="./main"
model="base.en"
model_path="models/ggml-$model.bin"
# require sox and ffmpeg to be installed
if ! command -v sox &> /dev/null
then
echo "sox could not be found"
exit 1
fi
if ! command -v ffmpeg &> /dev/null
then
echo "ffmpeg could not be found"
exit 2
fi
if [ ! -f "$executable" ]; then
echo "'$executable' does not exist. Please build it first."
exit 3
fi
if [ ! -f "$model_path" ]; then
echo "'$model_path' does not exist. Please download it first."
exit 4
fi
# record some raw audio
sox -d rec.wav
# resample to 16kHz
ffmpeg -y -i ./rec.wav -ar 16000 -ac 1 -c:a pcm_s16le ./rec16.wav > /dev/null 2>&1
# run Whisper
echo "Processing ..."
./main -m models/ggml-base.en.bin rec16.wav -owts > /dev/null 2>&1
# generate Karaoke video
echo "Generating video ..."
source rec16.wav.wts > /dev/null 2>&1
# play the video
echo "Playing ./rec16.wav.mp4 ..."
ffplay -loglevel 0 -autoexit ./rec16.wav.mp4
echo "Done"
exit 0

View File

@ -53,6 +53,7 @@ struct whisper_params {
int32_t n_processors = 1;
int32_t offset_t_ms = 0;
int32_t offset_n = 0;
int32_t duration_ms = 0;
int32_t max_context = -1;
int32_t max_len = 0;
@ -95,6 +96,8 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
params.offset_t_ms = std::stoi(argv[++i]);
} else if (arg == "-on" || arg == "--offset-n") {
params.offset_n = std::stoi(argv[++i]);
} else if (arg == "-d" || arg == "--duration") {
params.duration_ms = std::stoi(argv[++i]);
} else if (arg == "-mc" || arg == "--max-context") {
params.max_context = std::stoi(argv[++i]);
} else if (arg == "-ml" || arg == "--max-len") {
@ -154,6 +157,7 @@ void whisper_print_usage(int argc, char ** argv, const whisper_params & params)
fprintf(stderr, " -p N, --processors N number of processors to use during computation (default: %d)\n", params.n_processors);
fprintf(stderr, " -ot N, --offset-t N time offset in milliseconds (default: %d)\n", params.offset_t_ms);
fprintf(stderr, " -on N, --offset-n N segment index offset (default: %d)\n", params.offset_n);
fprintf(stderr, " -d N, --duration N duration of audio to process in milliseconds (default: %d)\n", params.duration_ms);
fprintf(stderr, " -mc N, --max-context N maximum number of text context tokens to store (default: max)\n");
fprintf(stderr, " -ml N, --max-len N maximum segment length in characters (default: %d)\n", params.max_len);
fprintf(stderr, " -wt N, --word-thold N word timestamp probability threshold (default: %f)\n", params.word_thold);
@ -532,6 +536,7 @@ int main(int argc, char ** argv) {
wparams.n_threads = params.n_threads;
wparams.n_max_text_ctx = params.max_context >= 0 ? params.max_context : wparams.n_max_text_ctx;
wparams.offset_ms = params.offset_t_ms;
wparams.duration_ms = params.duration_ms;
wparams.token_timestamps = params.output_wts || params.max_len > 0;
wparams.thold_pt = params.word_thold;

38
ggml-mtl.h Normal file
View File

@ -0,0 +1,38 @@
#pragma once
#include <stdint.h>
#include <stddef.h>
// TODO: this will hold dynamic context data in the future
// currently unused
struct ggml_mtl_context {
void * dummy;
};
struct ggml_mtl_object {
int32_t id;
void * data;
};
struct ggml_mtl_context * ggml_mtl_init(void);
struct ggml_mtl_object ggml_mtl_alloc(size_t size);
// multiply matrix by vector
void ggml_mtl_mul_mat_vec_f16(
struct ggml_mtl_context * ctx,
struct ggml_mtl_object src0, // matrix f16
const __fp16 * src1, // vector f16
float * dst, // vector f32
int nrows,
int ncols);
// multiply matrix by matrix
void ggml_mtl_mul_mat_f16(
struct ggml_mtl_context * ctx,
struct ggml_mtl_object src0, // matrix f16
const __fp16 * src1, // matrix f16
float * dst, // matrix f32
int nrows0,
int nrows1,
int ncols);

162
ggml-mtl.m Normal file
View File

@ -0,0 +1,162 @@
#import "ggml-mtl.h"
#import <Foundation/Foundation.h>
#import <Metal/Metal.h>
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
#define GGML_MTL_MAX_BUFFERS 256
// global static storage for Metal buffers
// TODO: move this into a dynamic context
static id<MTLBuffer> g_buffers[GGML_MTL_MAX_BUFFERS];
// global MTL context
// TODO: move this into a dynamic context
static id<MTLDevice> g_device;
static id<MTLCommandQueue> g_command_queue;
struct ggml_mtl_context * ggml_mtl_init() {
// TODO: implement properly
// for now, init the global MTL context and MTL buffers
g_device = MTLCreateSystemDefaultDevice();
g_command_queue = [g_device newCommandQueue];
if (g_command_queue == nil)
{
NSLog(@"Failed to find the command queue.");
return nil;
}
return nil;
}
// search for unallocated buffer slot and use it
struct ggml_mtl_object ggml_mtl_alloc(size_t size) {
// TODO: temporarily making sure that the buffers are nil at the start
static bool first = true;
if (first) {
for (int i = 0; i < GGML_MTL_MAX_BUFFERS; ++i) {
assert(g_buffers[i] == nil);
}
first = false;
}
struct ggml_mtl_object obj = { -1, nil };
for (int i = 0; i < GGML_MTL_MAX_BUFFERS; i++) {
if (g_buffers[i] == nil) {
g_buffers[i] = [g_device newBufferWithLength:size options:MTLResourceStorageModeManaged];
// lunk the MTL buffer to the ggml object
obj.id = i;
obj.data = [g_buffers[i] contents];
break;
}
}
return obj;
}
struct params_mul_mat_vec {
int N; // rows
int M; // cols
};
// multiply matrix with a vector using MPSMatrixVectorMultiplication
void ggml_mtl_mul_mat_vec_f16(
struct ggml_mtl_context * ctx,
struct ggml_mtl_object src0,
const __fp16 * src1,
float * dst,
int nrows,
int ncols) {
(void) ctx; // unused
// Create a command buffer to hold commands.
id<MTLCommandBuffer> commandBuffer = [g_command_queue commandBuffer];
assert(commandBuffer != nil);
// make managed device buffer to store src1
id<MTLBuffer> src1_buffer = [g_device newBufferWithBytes:src1 length:ncols*sizeof(__fp16) options:MTLResourceStorageModeManaged];
id<MTLBuffer> dst_buffer = [g_device newBufferWithLength:nrows*sizeof(float) options:MTLResourceStorageModeManaged];
// MPSMatrixDescriptor
MPSMatrixDescriptor *src0_desc = [MPSMatrixDescriptor matrixDescriptorWithRows:nrows columns:ncols rowBytes:ncols*sizeof(__fp16) dataType:MPSDataTypeFloat16];
MPSVectorDescriptor *src1_desc = [MPSVectorDescriptor vectorDescriptorWithLength:ncols dataType:MPSDataTypeFloat16];
MPSVectorDescriptor *dst_desc = [MPSVectorDescriptor vectorDescriptorWithLength:nrows dataType:MPSDataTypeFloat32];
// MPSMatrix
MPSMatrix *src0_mat = [[MPSMatrix alloc] initWithBuffer:g_buffers[src0.id] descriptor:src0_desc];
MPSVector *src1_vec = [[MPSVector alloc] initWithBuffer:src1_buffer descriptor:src1_desc];
MPSVector *dst_vec = [[MPSVector alloc] initWithBuffer:dst_buffer descriptor:dst_desc];
// MPSMatrixVectorMultiplication
MPSMatrixVectorMultiplication *mul_mat_vec = [[MPSMatrixVectorMultiplication alloc] initWithDevice:g_device transpose:NO rows:nrows columns:ncols alpha:1.0 beta:0.0];
// encode
[mul_mat_vec encodeToCommandBuffer:commandBuffer
inputMatrix:src0_mat
inputVector:src1_vec
resultVector:dst_vec];
[commandBuffer commit];
[commandBuffer waitUntilCompleted];
// copy GPU result to CPU
memcpy(dst, [dst_buffer contents], nrows*sizeof(float));
}
// multiply matrix with a matrix using MPSMatrixMultiplication
void ggml_mtl_mul_mat_f16(
struct ggml_mtl_context * ctx,
struct ggml_mtl_object src0,
const __fp16 * src1,
float * dst,
int nrows0,
int nrows1,
int ncols) {
(void) ctx; // unused
// Create a command buffer to hold commands.
id<MTLCommandBuffer> commandBuffer = [g_command_queue commandBuffer];
assert(commandBuffer != nil);
// make managed device buffer to store src1
id<MTLBuffer> src1_buffer = [g_device newBufferWithBytes:src1 length:ncols*nrows1*sizeof(__fp16) options:MTLResourceStorageModeManaged];
id<MTLBuffer> dst_buffer = [g_device newBufferWithLength:nrows0*nrows1*sizeof(float) options:MTLResourceStorageModeManaged];
// MPSMatrixDescriptor
MPSMatrixDescriptor *src0_desc = [MPSMatrixDescriptor matrixDescriptorWithRows:nrows0 columns:ncols rowBytes:ncols*sizeof(__fp16) dataType:MPSDataTypeFloat16];
MPSMatrixDescriptor *src1_desc = [MPSMatrixDescriptor matrixDescriptorWithRows:nrows1 columns:ncols rowBytes:ncols*sizeof(__fp16) dataType:MPSDataTypeFloat16];
MPSMatrixDescriptor *dst_desc = [MPSMatrixDescriptor matrixDescriptorWithRows:nrows1 columns:nrows0 rowBytes:nrows0*sizeof(float) dataType:MPSDataTypeFloat32];
// MPSMatrix
MPSMatrix *src0_mat = [[MPSMatrix alloc] initWithBuffer:g_buffers[src0.id] descriptor:src0_desc];
MPSMatrix *src1_mat = [[MPSMatrix alloc] initWithBuffer:src1_buffer descriptor:src1_desc];
MPSMatrix *dst_mat = [[MPSMatrix alloc] initWithBuffer:dst_buffer descriptor:dst_desc];
//// MPSMatrixMultiplication z = x * yT
//MPSMatrixMultiplication *mul_mat = [[MPSMatrixMultiplication alloc] initWithDevice:g_device transposeLeft:NO transposeRight:YES resultRows:nrows resultColumns:nrows interiorColumns:ncols alpha:1.0 beta:0.0];
//// encode
//[mul_mat encodeToCommandBuffer:commandBuffer
// leftMatrix:src0_mat
// rightMatrix:src1_mat
// resultMatrix:dst_mat];
// MPSMatrixMultiplication zT = xT * y
MPSMatrixMultiplication *mul_mat = [[MPSMatrixMultiplication alloc] initWithDevice:g_device transposeLeft:NO transposeRight:YES resultRows:nrows1 resultColumns:nrows0 interiorColumns:ncols alpha:1.0 beta:0.0];
// encode
[mul_mat encodeToCommandBuffer:commandBuffer
leftMatrix:src1_mat
rightMatrix:src0_mat
resultMatrix:dst_mat];
[commandBuffer commit];
[commandBuffer waitUntilCompleted];
// copy GPU result to CPU
memcpy(dst, [dst_buffer contents], nrows0*nrows1*sizeof(float));
}

138
ggml.c
View File

@ -1,5 +1,7 @@
#include "ggml.h"
#include "ggml-mtl.h"
#if defined(_MSC_VER) || defined(__MINGW32__)
#include <malloc.h> // using malloc.h with MSC/MINGW
#elif !defined(__FreeBSD__)
@ -1307,6 +1309,8 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
static bool first_time = true;
if (first_time) {
ggml_mtl_init(); // TODO: fix this
for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
g_state.contexts[i].used = false;
}
@ -1462,6 +1466,104 @@ struct ggml_tensor * ggml_new_tensor_impl(
/*.perf_cycles =*/ 0,
/*.perf_time_us =*/ 0,
/*.data =*/ data == NULL ? (void *)(result + 1) : data,
/*.id =*/ -1,
/*.pad =*/ { 0 },
};
ggml_assert_aligned(result->data);
for (int i = 0; i < n_dims; i++) {
result->ne[i] = ne[i];
}
result->nb[0] = GGML_TYPE_SIZE[type];
for (int i = 1; i < GGML_MAX_DIMS; i++) {
result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
}
ctx->n_objects++;
return result;
}
struct ggml_tensor * ggml_new_tensor_mtl_impl(
struct ggml_context * ctx,
enum ggml_type type,
int n_dims,
const int* ne,
void* data) {
// always insert objects at the end of the context's memory pool
struct ggml_object * obj_cur = ctx->objects_end;
const size_t cur_offset = obj_cur == NULL ? 0 : obj_cur->offset;
const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size;
const size_t cur_end = cur_offset + cur_size;
struct ggml_mtl_object obj_mtl;
{
assert(data == NULL); // TODO: in-place metal buffer, need page aligned memory
size_t size_needed_mtl = 0;
if (data == NULL) {
size_needed_mtl += GGML_TYPE_SIZE[type];
for (int i = 0; i < n_dims; i++) {
size_needed_mtl *= ne[i];
}
}
obj_mtl = ggml_mtl_alloc(size_needed_mtl);
}
size_t size_needed = 0;
size_needed += sizeof(struct ggml_tensor);
if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) {
GGML_PRINT("%s: not enough space in the context's memory pool\n", __func__);
assert(false);
return NULL;
}
char * const mem_buffer = ctx->mem_buffer;
struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end);
*obj_new = (struct ggml_object) {
.offset = cur_end + GGML_OBJECT_SIZE,
.size = size_needed,
.next = NULL,
};
if (obj_cur != NULL) {
obj_cur->next = obj_new;
} else {
// this is the first object in this context
ctx->objects_begin = obj_new;
}
ctx->objects_end = obj_new;
//GGML_PRINT_DEBUG("%s: inserted new object at %zu\n", __func__, cur_end);
struct ggml_tensor * const result = (struct ggml_tensor *)(mem_buffer + obj_new->offset);
ggml_assert_aligned(result);
*result = (struct ggml_tensor) {
/*.type =*/ type,
/*.n_dims =*/ n_dims,
/*.ne =*/ { 1, 1, 1, 1 },
/*.nb =*/ { 0, 0, 0, 0 },
/*.op =*/ GGML_OP_NONE,
/*.is_param =*/ false,
/*.grad =*/ NULL,
/*.src0 =*/ NULL,
/*.src1 =*/ NULL,
/*.opt =*/ { NULL },
/*.n_tasks =*/ 0,
/*.perf_runs =*/ 0,
/*.perf_cycles =*/ 0,
/*.perf_time_us =*/ 0,
/*.data =*/ obj_mtl.data,
/*.id =*/ obj_mtl.id,
/*.pad =*/ { 0 },
};
@ -1489,6 +1591,14 @@ struct ggml_tensor * ggml_new_tensor(
return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL);
}
struct ggml_tensor * ggml_new_tensor_mtl(
struct ggml_context * ctx,
enum ggml_type type,
int n_dims,
const int* ne) {
return ggml_new_tensor_mtl_impl(ctx, type, n_dims, ne, NULL);
}
struct ggml_tensor * ggml_new_tensor_1d(
struct ggml_context * ctx,
enum ggml_type type,
@ -1505,6 +1615,15 @@ struct ggml_tensor * ggml_new_tensor_2d(
return ggml_new_tensor(ctx, type, 2, ne);
}
struct ggml_tensor * ggml_new_tensor_2d_mtl(
struct ggml_context * ctx,
enum ggml_type type,
int ne0,
int ne1) {
const int ne[2] = { ne0, ne1 };
return ggml_new_tensor_mtl(ctx, type, 2, ne);
}
struct ggml_tensor * ggml_new_tensor_3d(
struct ggml_context * ctx,
enum ggml_type type,
@ -4343,8 +4462,11 @@ void ggml_compute_forward_mul_mat_f16_f32(
// nb00 < nb01 - src0 is transposed
// compute by src0 columns
// are we using Metal?
const bool is_mtl = src0->id >= 0;
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst) && !is_mtl) {
GGML_ASSERT(nb10 == sizeof(float));
if (params->ith != 0) return;
@ -4472,6 +4594,20 @@ void ggml_compute_forward_mul_mat_f16_f32(
// parallelize by src0 rows using ggml_vec_dot_f32
if (is_mtl) {
assert(ne02 == 1);
assert(ne03 == 1);
if (params->ith == 0) {
printf("XXXXXXXXXXX src0->ne[0] = %d, src0->ne[1] = %d\n", src0->ne[0], src0->ne[1]);
printf("XXXXXXXXXXX src1->ne[0] = %d, src1->ne[1] = %d\n", src1->ne[0], src1->ne[1]);
struct ggml_mtl_object src0_mtl = { src0->id, src0->data };
ggml_fp16_t * src1_fp16 = params->wdata;
ggml_mtl_mul_mat_f16(NULL, src0_mtl, src1_fp16, dst->data, ne01, ne11, ne00);
}
return;
}
// total rows in src0
const int nr = ne01*ne02*ne03;

9
ggml.h
View File

@ -108,7 +108,8 @@ struct ggml_tensor {
int64_t perf_time_us;
void * data;
char padding[8];
int32_t id; // TODO: mtl buffer id
char pad[4];
};
// computation graph
@ -173,6 +174,12 @@ struct ggml_tensor * ggml_new_tensor_2d(
int ne0,
int ne1);
struct ggml_tensor * ggml_new_tensor_2d_mtl(
struct ggml_context * ctx,
enum ggml_type type,
int ne0,
int ne1);
struct ggml_tensor * ggml_new_tensor_3d(
struct ggml_context * ctx,
enum ggml_type type,

View File

@ -788,10 +788,10 @@ static bool whisper_model_load(const std::string & fname, whisper_context & wctx
layer.mlp_ln_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state);
layer.mlp_ln_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state);
layer.mlp_0_w = ggml_new_tensor_2d(ctx, wtype, n_audio_state, 4*n_audio_state);
layer.mlp_0_w = ggml_new_tensor_2d_mtl(ctx, wtype, n_audio_state, 4*n_audio_state); // offload to GPU
layer.mlp_0_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_audio_state);
layer.mlp_1_w = ggml_new_tensor_2d(ctx, wtype, 4*n_audio_state, n_audio_state);
layer.mlp_1_w = ggml_new_tensor_2d_mtl(ctx, wtype, 4*n_audio_state, n_audio_state); // offload to GPU
layer.mlp_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state);
layer.attn_ln_0_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state);
@ -1342,7 +1342,7 @@ static bool whisper_encode(
ggml_build_forward_expand(&gf, inpO);
ggml_graph_compute (ctxL, &gf);
//ggml_graph_print(&gf);
ggml_graph_print(&gf);
}
// TODO: this is a hack to have per-layer computation graphs - need to come up with something better
@ -2339,6 +2339,7 @@ struct whisper_full_params whisper_full_default_params(enum whisper_sampling_str
/*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
/*.n_max_text_ctx =*/ 16384,
/*.offset_ms =*/ 0,
/*.duration_ms =*/ 0,
/*.translate =*/ false,
/*.no_context =*/ false,
@ -2376,6 +2377,7 @@ struct whisper_full_params whisper_full_default_params(enum whisper_sampling_str
/*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
/*.n_max_text_ctx =*/ 16384,
/*.offset_ms =*/ 0,
/*.duration_ms =*/ 0,
/*.translate =*/ false,
/*.no_context =*/ false,
@ -2496,11 +2498,12 @@ int whisper_full(
}
const int seek_start = params.offset_ms/10;
const int seek_end = seek_start + (params.duration_ms == 0 ? whisper_n_len(ctx) : params.duration_ms/10);
// if length of spectrogram is less than 1s (100 samples), then return
// basically don't process anything that is less than 1s
// see issue #39: https://github.com/ggerganov/whisper.cpp/issues/39
if (whisper_n_len(ctx) < 100 + seek_start) {
if (seek_end < 100 + seek_start) {
return 0;
}
@ -2533,7 +2536,7 @@ int whisper_full(
// main loop
int seek = seek_start;
while (true) {
int progress_cur = (100*seek)/whisper_n_len(ctx);
const int progress_cur = (100*(seek - seek_start))/(seek_end - seek_start);
while (progress_cur >= progress_prev + progress_step) {
progress_prev += progress_step;
if (params.print_progress) {
@ -2541,7 +2544,7 @@ int whisper_full(
}
}
if (seek + 100 >= whisper_n_len(ctx)) {
if (seek + 100 >= seek_end) {
break;
}
@ -2622,7 +2625,7 @@ int whisper_full(
// end of text token
if (token.id == whisper_token_eot(ctx)) {
if (result_len == 0) {
if (seek + seek_delta + 100 >= whisper_n_len(ctx)) {
if (seek + seek_delta + 100 >= seek_end) {
result_len = i + 1;
} else {
// TODO: figure out how to resolve this

View File

@ -186,7 +186,8 @@ extern "C" {
int n_threads;
int n_max_text_ctx;
int offset_ms;
int offset_ms; // start offset in ms
int duration_ms; // audio duration to process in ms
bool translate;
bool no_context;