forked from extern/whisper.cpp
Compare commits
1 Commits
Author | SHA1 | Date | |
---|---|---|---|
a0da7f71a2 |
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,7 +1,5 @@
|
||||
*.o
|
||||
*.a
|
||||
*.mlmodel
|
||||
*.mlmodelc
|
||||
.cache/
|
||||
.vs/
|
||||
.vscode/
|
||||
@ -12,7 +10,6 @@ build-em/
|
||||
build-debug/
|
||||
build-release/
|
||||
build-static/
|
||||
build-no-accel/
|
||||
build-sanitize-addr/
|
||||
build-sanitize-thread/
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
cmake_minimum_required (VERSION 3.0)
|
||||
|
||||
project(whisper.cpp VERSION 1.2.1)
|
||||
project(whisper.cpp VERSION 1.2.0)
|
||||
|
||||
# Add path to modules
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
|
||||
@ -54,8 +54,6 @@ if (APPLE)
|
||||
option(WHISPER_NO_AVX "whisper: disable AVX" OFF)
|
||||
option(WHISPER_NO_AVX2 "whisper: disable AVX2" OFF)
|
||||
option(WHISPER_NO_FMA "whisper: disable FMA" OFF)
|
||||
|
||||
option(WHISPER_COREML "whisper: enable Core ML framework" OFF)
|
||||
else()
|
||||
option(WHISPER_SUPPORT_OPENBLAS "whisper: support for OpenBLAS" OFF)
|
||||
endif()
|
||||
@ -88,33 +86,16 @@ endif()
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
# on APPLE
|
||||
if (APPLE)
|
||||
# include Accelerate framework
|
||||
if (NOT WHISPER_NO_ACCELERATE)
|
||||
find_library(ACCELERATE_FRAMEWORK Accelerate)
|
||||
# on APPLE - include Accelerate framework
|
||||
if (APPLE AND NOT WHISPER_NO_ACCELERATE)
|
||||
find_library(ACCELERATE_FRAMEWORK Accelerate)
|
||||
if (ACCELERATE_FRAMEWORK)
|
||||
message(STATUS "Accelerate framework found")
|
||||
|
||||
if (ACCELERATE_FRAMEWORK)
|
||||
message(STATUS "Accelerate framework found")
|
||||
|
||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${ACCELERATE_FRAMEWORK})
|
||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_ACCELERATE)
|
||||
else()
|
||||
message(WARNING "Accelerate framework not found")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (WHISPER_COREML)
|
||||
find_library(FOUNDATION_FRAMEWORK Foundation)
|
||||
find_library(COREML_FRAMEWORK CoreML)
|
||||
|
||||
if (COREML_FRAMEWORK)
|
||||
message(STATUS "CoreML framework found")
|
||||
|
||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DWHISPER_USE_COREML)
|
||||
else()
|
||||
message(WARNING "CoreML framework not found")
|
||||
endif()
|
||||
set(WHISPER_EXTRA_LIBS ${WHISPER_EXTRA_LIBS} ${ACCELERATE_FRAMEWORK})
|
||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_USE_ACCELERATE)
|
||||
else()
|
||||
message(WARNING "Accelerate framework not found")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@ -200,33 +181,6 @@ if (WHISPER_PERF)
|
||||
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -DGGML_PERF)
|
||||
endif()
|
||||
|
||||
#
|
||||
# whisper.coreml - Core ML support
|
||||
#
|
||||
|
||||
if (WHISPER_COREML)
|
||||
set(TARGET whisper.coreml)
|
||||
|
||||
add_library(${TARGET}
|
||||
coreml/whisper-encoder.h
|
||||
coreml/whisper-encoder.mm
|
||||
coreml/whisper-encoder-impl.h
|
||||
coreml/whisper-encoder-impl.m
|
||||
)
|
||||
|
||||
include(DefaultTargetOptions)
|
||||
|
||||
target_include_directories(${TARGET} PUBLIC
|
||||
.
|
||||
)
|
||||
|
||||
target_link_libraries(${TARGET} PRIVATE ${FOUNDATION_FRAMEWORK} ${COREML_FRAMEWORK})
|
||||
|
||||
set_target_properties(${TARGET} PROPERTIES
|
||||
COMPILE_FLAGS "-fobjc-arc"
|
||||
)
|
||||
endif()
|
||||
|
||||
#
|
||||
# whisper - this is the main library of the project
|
||||
#
|
||||
@ -246,10 +200,6 @@ target_include_directories(${TARGET} PUBLIC
|
||||
.
|
||||
)
|
||||
|
||||
if (WHISPER_COREML)
|
||||
target_link_libraries(${TARGET} PRIVATE whisper.coreml)
|
||||
endif()
|
||||
|
||||
if (MSVC)
|
||||
target_link_libraries(${TARGET} PRIVATE ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
||||
|
50
Makefile
50
Makefile
@ -30,8 +30,8 @@ endif
|
||||
# Compile flags
|
||||
#
|
||||
|
||||
CFLAGS = -I. -O3 -DNDEBUG -std=c11 -fPIC
|
||||
CXXFLAGS = -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC
|
||||
CFLAGS = -I. -O3 -std=c11 -fPIC
|
||||
CXXFLAGS = -I. -I./examples -O3 -std=c++11 -fPIC
|
||||
LDFLAGS =
|
||||
|
||||
# OS specific
|
||||
@ -132,10 +132,6 @@ ifndef WHISPER_NO_ACCELERATE
|
||||
LDFLAGS += -framework Accelerate
|
||||
endif
|
||||
endif
|
||||
ifdef WHISPER_COREML
|
||||
CXXFLAGS += -DWHISPER_USE_COREML
|
||||
LDFLAGS += -framework Foundation -framework CoreML
|
||||
endif
|
||||
ifdef WHISPER_OPENBLAS
|
||||
CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/openblas
|
||||
LDFLAGS += -lopenblas
|
||||
@ -145,8 +141,6 @@ ifdef WHISPER_GPROF
|
||||
CXXFLAGS += -pg
|
||||
endif
|
||||
ifneq ($(filter aarch64%,$(UNAME_M)),)
|
||||
CFLAGS += -mcpu=native
|
||||
CXXFLAGS += -mcpu=native
|
||||
endif
|
||||
ifneq ($(filter armv6%,$(UNAME_M)),)
|
||||
# Raspberry Pi 1, 2, 3
|
||||
@ -188,23 +182,11 @@ ggml.o: ggml.c ggml.h
|
||||
whisper.o: whisper.cpp whisper.h
|
||||
$(CXX) $(CXXFLAGS) -c whisper.cpp -o whisper.o
|
||||
|
||||
ifndef WHISPER_COREML
|
||||
WHISPER_OBJ = whisper.o
|
||||
else
|
||||
whisper-encoder.o: coreml/whisper-encoder.mm coreml/whisper-encoder.h
|
||||
$(CXX) -O3 -I . -c coreml/whisper-encoder.mm -o whisper-encoder.o
|
||||
libwhisper.a: ggml.o whisper.o
|
||||
$(AR) rcs libwhisper.a ggml.o whisper.o
|
||||
|
||||
whisper-encoder-impl.o: coreml/whisper-encoder-impl.m coreml/whisper-encoder-impl.h
|
||||
$(CXX) -O3 -I . -fobjc-arc -c coreml/whisper-encoder-impl.m -o whisper-encoder-impl.o
|
||||
|
||||
WHISPER_OBJ = whisper.o whisper-encoder.o whisper-encoder-impl.o
|
||||
endif
|
||||
|
||||
libwhisper.a: ggml.o $(WHISPER_OBJ)
|
||||
$(AR) rcs libwhisper.a ggml.o $(WHISPER_OBJ)
|
||||
|
||||
libwhisper.so: ggml.o $(WHISPER_OBJ)
|
||||
$(CXX) $(CXXFLAGS) -shared -o libwhisper.so ggml.o $(WHISPER_OBJ) $(LDFLAGS)
|
||||
libwhisper.so: ggml.o whisper.o
|
||||
$(CXX) $(CXXFLAGS) -shared -o libwhisper.so ggml.o whisper.o $(LDFLAGS)
|
||||
|
||||
clean:
|
||||
rm -f *.o main stream command talk bench libwhisper.a libwhisper.so
|
||||
@ -218,21 +200,21 @@ CC_SDL=`sdl2-config --cflags --libs`
|
||||
SRC_COMMON = examples/common.cpp
|
||||
SRC_COMMON_SDL = examples/common-sdl.cpp
|
||||
|
||||
main: examples/main/main.cpp $(SRC_COMMON) ggml.o $(WHISPER_OBJ)
|
||||
$(CXX) $(CXXFLAGS) examples/main/main.cpp $(SRC_COMMON) ggml.o $(WHISPER_OBJ) -o main $(LDFLAGS)
|
||||
main: examples/main/main.cpp $(SRC_COMMON) ggml.o whisper.o
|
||||
$(CXX) $(CXXFLAGS) examples/main/main.cpp $(SRC_COMMON) ggml.o whisper.o -o main $(LDFLAGS)
|
||||
./main -h
|
||||
|
||||
stream: examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ)
|
||||
$(CXX) $(CXXFLAGS) examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ) -o stream $(CC_SDL) $(LDFLAGS)
|
||||
stream: examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o whisper.o
|
||||
$(CXX) $(CXXFLAGS) examples/stream/stream.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o whisper.o -o stream $(CC_SDL) $(LDFLAGS)
|
||||
|
||||
command: examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ)
|
||||
$(CXX) $(CXXFLAGS) examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ) -o command $(CC_SDL) $(LDFLAGS)
|
||||
command: examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o whisper.o
|
||||
$(CXX) $(CXXFLAGS) examples/command/command.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o whisper.o -o command $(CC_SDL) $(LDFLAGS)
|
||||
|
||||
talk: examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ)
|
||||
$(CXX) $(CXXFLAGS) examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o $(WHISPER_OBJ) -o talk $(CC_SDL) $(LDFLAGS)
|
||||
talk: examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o whisper.o
|
||||
$(CXX) $(CXXFLAGS) examples/talk/talk.cpp examples/talk/gpt-2.cpp $(SRC_COMMON) $(SRC_COMMON_SDL) ggml.o whisper.o -o talk $(CC_SDL) $(LDFLAGS)
|
||||
|
||||
bench: examples/bench/bench.cpp ggml.o $(WHISPER_OBJ)
|
||||
$(CXX) $(CXXFLAGS) examples/bench/bench.cpp ggml.o $(WHISPER_OBJ) -o bench $(LDFLAGS)
|
||||
bench: examples/bench/bench.cpp ggml.o whisper.o
|
||||
$(CXX) $(CXXFLAGS) examples/bench/bench.cpp ggml.o whisper.o -o bench $(LDFLAGS)
|
||||
|
||||
#
|
||||
# Audio samples
|
||||
|
@ -4,7 +4,7 @@
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://www.npmjs.com/package/whisper.cpp/)
|
||||
|
||||
Stable: [v1.2.1](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.2.1) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
||||
Stable: [v1.2.0](https://github.com/ggerganov/whisper.cpp/releases/tag/v1.2.0) / [Roadmap | F.A.Q.](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
||||
|
||||
High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
|
||||
|
||||
@ -469,9 +469,7 @@ in [models](models).
|
||||
- [X] .NET: | [#422](https://github.com/ggerganov/whisper.cpp/discussions/422)
|
||||
- [sandrohanea/whisper.net](https://github.com/sandrohanea/whisper.net)
|
||||
- [NickDarvey/whisper](https://github.com/NickDarvey/whisper)
|
||||
- [X] Python: | [#9](https://github.com/ggerganov/whisper.cpp/issues/9)
|
||||
- [stlukey/whispercpp.py](https://github.com/stlukey/whispercpp.py) (Cython)
|
||||
- [aarnphm/whispercpp](https://github.com/aarnphm/whispercpp) (Pybind11)
|
||||
- [ ] Python: soon | [WIP](https://github.com/ggerganov/whisper.cpp/issues/9)
|
||||
|
||||
## Examples
|
||||
|
||||
|
Submodule bindings/ios updated: 92d4c5c9a0...d5c6d5c8a3
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "whisper.cpp",
|
||||
"version": "1.2.1",
|
||||
"version": "1.2.0",
|
||||
"description": "Whisper speech recognition",
|
||||
"main": "whisper.js",
|
||||
"scripts": {
|
||||
|
@ -1,142 +0,0 @@
|
||||
//
|
||||
// CoremlEncoder.h
|
||||
//
|
||||
// This file was automatically generated and should not be edited.
|
||||
//
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
#import <CoreML/CoreML.h>
|
||||
#include <stdint.h>
|
||||
#include <os/log.h>
|
||||
|
||||
NS_ASSUME_NONNULL_BEGIN
|
||||
|
||||
|
||||
/// Model Prediction Input Type
|
||||
API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) __attribute__((visibility("hidden")))
|
||||
@interface CoremlEncoderInput : NSObject<MLFeatureProvider>
|
||||
|
||||
/// melSegment as 1 × 80 × 3000 3-dimensional array of floats
|
||||
@property (readwrite, nonatomic, strong) MLMultiArray * melSegment;
|
||||
- (instancetype)init NS_UNAVAILABLE;
|
||||
- (instancetype)initWithMelSegment:(MLMultiArray *)melSegment NS_DESIGNATED_INITIALIZER;
|
||||
|
||||
@end
|
||||
|
||||
|
||||
/// Model Prediction Output Type
|
||||
API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) __attribute__((visibility("hidden")))
|
||||
@interface CoremlEncoderOutput : NSObject<MLFeatureProvider>
|
||||
|
||||
/// output as multidimensional array of floats
|
||||
@property (readwrite, nonatomic, strong) MLMultiArray * output;
|
||||
- (instancetype)init NS_UNAVAILABLE;
|
||||
- (instancetype)initWithOutput:(MLMultiArray *)output NS_DESIGNATED_INITIALIZER;
|
||||
|
||||
@end
|
||||
|
||||
|
||||
/// Class for model loading and prediction
|
||||
API_AVAILABLE(macos(10.15), ios(13.0), watchos(6.0), tvos(13.0)) __attribute__((visibility("hidden")))
|
||||
@interface CoremlEncoder : NSObject
|
||||
@property (readonly, nonatomic, nullable) MLModel * model;
|
||||
|
||||
/**
|
||||
URL of the underlying .mlmodelc directory.
|
||||
*/
|
||||
+ (nullable NSURL *)URLOfModelInThisBundle;
|
||||
|
||||
/**
|
||||
Initialize CoremlEncoder instance from an existing MLModel object.
|
||||
|
||||
Usually the application does not use this initializer unless it makes a subclass of CoremlEncoder.
|
||||
Such application may want to use `-[MLModel initWithContentsOfURL:configuration:error:]` and `+URLOfModelInThisBundle` to create a MLModel object to pass-in.
|
||||
*/
|
||||
- (instancetype)initWithMLModel:(MLModel *)model NS_DESIGNATED_INITIALIZER;
|
||||
|
||||
/**
|
||||
Initialize CoremlEncoder instance with the model in this bundle.
|
||||
*/
|
||||
- (nullable instancetype)init;
|
||||
|
||||
/**
|
||||
Initialize CoremlEncoder instance with the model in this bundle.
|
||||
|
||||
@param configuration The model configuration object
|
||||
@param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
|
||||
*/
|
||||
- (nullable instancetype)initWithConfiguration:(MLModelConfiguration *)configuration error:(NSError * _Nullable __autoreleasing * _Nullable)error;
|
||||
|
||||
/**
|
||||
Initialize CoremlEncoder instance from the model URL.
|
||||
|
||||
@param modelURL URL to the .mlmodelc directory for CoremlEncoder.
|
||||
@param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
|
||||
*/
|
||||
- (nullable instancetype)initWithContentsOfURL:(NSURL *)modelURL error:(NSError * _Nullable __autoreleasing * _Nullable)error;
|
||||
|
||||
/**
|
||||
Initialize CoremlEncoder instance from the model URL.
|
||||
|
||||
@param modelURL URL to the .mlmodelc directory for CoremlEncoder.
|
||||
@param configuration The model configuration object
|
||||
@param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
|
||||
*/
|
||||
- (nullable instancetype)initWithContentsOfURL:(NSURL *)modelURL configuration:(MLModelConfiguration *)configuration error:(NSError * _Nullable __autoreleasing * _Nullable)error;
|
||||
|
||||
/**
|
||||
Construct CoremlEncoder instance asynchronously with configuration.
|
||||
Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
|
||||
|
||||
@param configuration The model configuration
|
||||
@param handler When the model load completes successfully or unsuccessfully, the completion handler is invoked with a valid CoremlEncoder instance or NSError object.
|
||||
*/
|
||||
+ (void)loadWithConfiguration:(MLModelConfiguration *)configuration completionHandler:(void (^)(CoremlEncoder * _Nullable model, NSError * _Nullable error))handler API_AVAILABLE(macos(11.0), ios(14.0), watchos(7.0), tvos(14.0)) __attribute__((visibility("hidden")));
|
||||
|
||||
/**
|
||||
Construct CoremlEncoder instance asynchronously with URL of .mlmodelc directory and optional configuration.
|
||||
|
||||
Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
|
||||
|
||||
@param modelURL The model URL.
|
||||
@param configuration The model configuration
|
||||
@param handler When the model load completes successfully or unsuccessfully, the completion handler is invoked with a valid CoremlEncoder instance or NSError object.
|
||||
*/
|
||||
+ (void)loadContentsOfURL:(NSURL *)modelURL configuration:(MLModelConfiguration *)configuration completionHandler:(void (^)(CoremlEncoder * _Nullable model, NSError * _Nullable error))handler API_AVAILABLE(macos(11.0), ios(14.0), watchos(7.0), tvos(14.0)) __attribute__((visibility("hidden")));
|
||||
|
||||
/**
|
||||
Make a prediction using the standard interface
|
||||
@param input an instance of CoremlEncoderInput to predict from
|
||||
@param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
|
||||
@return the prediction as CoremlEncoderOutput
|
||||
*/
|
||||
- (nullable CoremlEncoderOutput *)predictionFromFeatures:(CoremlEncoderInput *)input error:(NSError * _Nullable __autoreleasing * _Nullable)error;
|
||||
|
||||
/**
|
||||
Make a prediction using the standard interface
|
||||
@param input an instance of CoremlEncoderInput to predict from
|
||||
@param options prediction options
|
||||
@param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
|
||||
@return the prediction as CoremlEncoderOutput
|
||||
*/
|
||||
- (nullable CoremlEncoderOutput *)predictionFromFeatures:(CoremlEncoderInput *)input options:(MLPredictionOptions *)options error:(NSError * _Nullable __autoreleasing * _Nullable)error;
|
||||
|
||||
/**
|
||||
Make a prediction using the convenience interface
|
||||
@param melSegment as 1 × 80 × 3000 3-dimensional array of floats:
|
||||
@param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
|
||||
@return the prediction as CoremlEncoderOutput
|
||||
*/
|
||||
- (nullable CoremlEncoderOutput *)predictionFromMelSegment:(MLMultiArray *)melSegment error:(NSError * _Nullable __autoreleasing * _Nullable)error;
|
||||
|
||||
/**
|
||||
Batch prediction
|
||||
@param inputArray array of CoremlEncoderInput instances to obtain predictions from
|
||||
@param options prediction options
|
||||
@param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
|
||||
@return the predictions as NSArray<CoremlEncoderOutput *>
|
||||
*/
|
||||
- (nullable NSArray<CoremlEncoderOutput *> *)predictionsFromInputs:(NSArray<CoremlEncoderInput*> *)inputArray options:(MLPredictionOptions *)options error:(NSError * _Nullable __autoreleasing * _Nullable)error;
|
||||
@end
|
||||
|
||||
NS_ASSUME_NONNULL_END
|
@ -1,197 +0,0 @@
|
||||
//
|
||||
// CoremlEncoder.m
|
||||
//
|
||||
// This file was automatically generated and should not be edited.
|
||||
//
|
||||
|
||||
#if !__has_feature(objc_arc)
|
||||
#error This file must be compiled with automatic reference counting enabled (-fobjc-arc)
|
||||
#endif
|
||||
|
||||
#import "whisper-encoder-impl.h"
|
||||
|
||||
@implementation CoremlEncoderInput
|
||||
|
||||
- (instancetype)initWithMelSegment:(MLMultiArray *)melSegment {
|
||||
self = [super init];
|
||||
if (self) {
|
||||
_melSegment = melSegment;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (NSSet<NSString *> *)featureNames {
|
||||
return [NSSet setWithArray:@[@"melSegment"]];
|
||||
}
|
||||
|
||||
- (nullable MLFeatureValue *)featureValueForName:(NSString *)featureName {
|
||||
if ([featureName isEqualToString:@"melSegment"]) {
|
||||
return [MLFeatureValue featureValueWithMultiArray:self.melSegment];
|
||||
}
|
||||
return nil;
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@implementation CoremlEncoderOutput
|
||||
|
||||
- (instancetype)initWithOutput:(MLMultiArray *)output {
|
||||
self = [super init];
|
||||
if (self) {
|
||||
_output = output;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (NSSet<NSString *> *)featureNames {
|
||||
return [NSSet setWithArray:@[@"output"]];
|
||||
}
|
||||
|
||||
- (nullable MLFeatureValue *)featureValueForName:(NSString *)featureName {
|
||||
if ([featureName isEqualToString:@"output"]) {
|
||||
return [MLFeatureValue featureValueWithMultiArray:self.output];
|
||||
}
|
||||
return nil;
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@implementation CoremlEncoder
|
||||
|
||||
|
||||
/**
|
||||
URL of the underlying .mlmodelc directory.
|
||||
*/
|
||||
+ (nullable NSURL *)URLOfModelInThisBundle {
|
||||
NSString *assetPath = [[NSBundle bundleForClass:[self class]] pathForResource:@"CoremlEncoder" ofType:@"mlmodelc"];
|
||||
if (nil == assetPath) { os_log_error(OS_LOG_DEFAULT, "Could not load CoremlEncoder.mlmodelc in the bundle resource"); return nil; }
|
||||
return [NSURL fileURLWithPath:assetPath];
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Initialize CoremlEncoder instance from an existing MLModel object.
|
||||
|
||||
Usually the application does not use this initializer unless it makes a subclass of CoremlEncoder.
|
||||
Such application may want to use `-[MLModel initWithContentsOfURL:configuration:error:]` and `+URLOfModelInThisBundle` to create a MLModel object to pass-in.
|
||||
*/
|
||||
- (instancetype)initWithMLModel:(MLModel *)model {
|
||||
self = [super init];
|
||||
if (!self) { return nil; }
|
||||
_model = model;
|
||||
if (_model == nil) { return nil; }
|
||||
return self;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Initialize CoremlEncoder instance with the model in this bundle.
|
||||
*/
|
||||
- (nullable instancetype)init {
|
||||
return [self initWithContentsOfURL:(NSURL * _Nonnull)self.class.URLOfModelInThisBundle error:nil];
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Initialize CoremlEncoder instance with the model in this bundle.
|
||||
|
||||
@param configuration The model configuration object
|
||||
@param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
|
||||
*/
|
||||
- (nullable instancetype)initWithConfiguration:(MLModelConfiguration *)configuration error:(NSError * _Nullable __autoreleasing * _Nullable)error {
|
||||
return [self initWithContentsOfURL:(NSURL * _Nonnull)self.class.URLOfModelInThisBundle configuration:configuration error:error];
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Initialize CoremlEncoder instance from the model URL.
|
||||
|
||||
@param modelURL URL to the .mlmodelc directory for CoremlEncoder.
|
||||
@param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
|
||||
*/
|
||||
- (nullable instancetype)initWithContentsOfURL:(NSURL *)modelURL error:(NSError * _Nullable __autoreleasing * _Nullable)error {
|
||||
MLModel *model = [MLModel modelWithContentsOfURL:modelURL error:error];
|
||||
if (model == nil) { return nil; }
|
||||
return [self initWithMLModel:model];
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Initialize CoremlEncoder instance from the model URL.
|
||||
|
||||
@param modelURL URL to the .mlmodelc directory for CoremlEncoder.
|
||||
@param configuration The model configuration object
|
||||
@param error If an error occurs, upon return contains an NSError object that describes the problem. If you are not interested in possible errors, pass in NULL.
|
||||
*/
|
||||
- (nullable instancetype)initWithContentsOfURL:(NSURL *)modelURL configuration:(MLModelConfiguration *)configuration error:(NSError * _Nullable __autoreleasing * _Nullable)error {
|
||||
MLModel *model = [MLModel modelWithContentsOfURL:modelURL configuration:configuration error:error];
|
||||
if (model == nil) { return nil; }
|
||||
return [self initWithMLModel:model];
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Construct CoremlEncoder instance asynchronously with configuration.
|
||||
Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
|
||||
|
||||
@param configuration The model configuration
|
||||
@param handler When the model load completes successfully or unsuccessfully, the completion handler is invoked with a valid CoremlEncoder instance or NSError object.
|
||||
*/
|
||||
+ (void)loadWithConfiguration:(MLModelConfiguration *)configuration completionHandler:(void (^)(CoremlEncoder * _Nullable model, NSError * _Nullable error))handler {
|
||||
[self loadContentsOfURL:(NSURL * _Nonnull)[self URLOfModelInThisBundle]
|
||||
configuration:configuration
|
||||
completionHandler:handler];
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Construct CoremlEncoder instance asynchronously with URL of .mlmodelc directory and optional configuration.
|
||||
|
||||
Model loading may take time when the model content is not immediately available (e.g. encrypted model). Use this factory method especially when the caller is on the main thread.
|
||||
|
||||
@param modelURL The model URL.
|
||||
@param configuration The model configuration
|
||||
@param handler When the model load completes successfully or unsuccessfully, the completion handler is invoked with a valid CoremlEncoder instance or NSError object.
|
||||
*/
|
||||
+ (void)loadContentsOfURL:(NSURL *)modelURL configuration:(MLModelConfiguration *)configuration completionHandler:(void (^)(CoremlEncoder * _Nullable model, NSError * _Nullable error))handler {
|
||||
[MLModel loadContentsOfURL:modelURL
|
||||
configuration:configuration
|
||||
completionHandler:^(MLModel *model, NSError *error) {
|
||||
if (model != nil) {
|
||||
CoremlEncoder *typedModel = [[CoremlEncoder alloc] initWithMLModel:model];
|
||||
handler(typedModel, nil);
|
||||
} else {
|
||||
handler(nil, error);
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
- (nullable CoremlEncoderOutput *)predictionFromFeatures:(CoremlEncoderInput *)input error:(NSError * _Nullable __autoreleasing * _Nullable)error {
|
||||
return [self predictionFromFeatures:input options:[[MLPredictionOptions alloc] init] error:error];
|
||||
}
|
||||
|
||||
- (nullable CoremlEncoderOutput *)predictionFromFeatures:(CoremlEncoderInput *)input options:(MLPredictionOptions *)options error:(NSError * _Nullable __autoreleasing * _Nullable)error {
|
||||
id<MLFeatureProvider> outFeatures = [self.model predictionFromFeatures:input options:options error:error];
|
||||
if (!outFeatures) { return nil; }
|
||||
return [[CoremlEncoderOutput alloc] initWithOutput:(MLMultiArray *)[outFeatures featureValueForName:@"output"].multiArrayValue];
|
||||
}
|
||||
|
||||
- (nullable CoremlEncoderOutput *)predictionFromMelSegment:(MLMultiArray *)melSegment error:(NSError * _Nullable __autoreleasing * _Nullable)error {
|
||||
CoremlEncoderInput *input_ = [[CoremlEncoderInput alloc] initWithMelSegment:melSegment];
|
||||
return [self predictionFromFeatures:input_ error:error];
|
||||
}
|
||||
|
||||
- (nullable NSArray<CoremlEncoderOutput *> *)predictionsFromInputs:(NSArray<CoremlEncoderInput*> *)inputArray options:(MLPredictionOptions *)options error:(NSError * _Nullable __autoreleasing * _Nullable)error {
|
||||
id<MLBatchProvider> inBatch = [[MLArrayBatchProvider alloc] initWithFeatureProviderArray:inputArray];
|
||||
id<MLBatchProvider> outBatch = [self.model predictionsFromBatch:inBatch options:options error:error];
|
||||
if (!outBatch) { return nil; }
|
||||
NSMutableArray<CoremlEncoderOutput*> *results = [NSMutableArray arrayWithCapacity:(NSUInteger)outBatch.count];
|
||||
for (NSInteger i = 0; i < outBatch.count; i++) {
|
||||
id<MLFeatureProvider> resultProvider = [outBatch featuresAtIndex:i];
|
||||
CoremlEncoderOutput * result = [[CoremlEncoderOutput alloc] initWithOutput:(MLMultiArray *)[resultProvider featureValueForName:@"output"].multiArrayValue];
|
||||
[results addObject:result];
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
@end
|
@ -1,22 +0,0 @@
|
||||
// Wrapper of the Core ML Whisper Encoder model
|
||||
//
|
||||
// Code is derived from the work of Github user @wangchou
|
||||
// ref: https://github.com/wangchou/callCoreMLFromCpp
|
||||
|
||||
#if __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct whisper_coreml_context;
|
||||
|
||||
struct whisper_coreml_context * whisper_coreml_init(const char * path_model);
|
||||
void whisper_coreml_free(struct whisper_coreml_context * ctx);
|
||||
|
||||
void whisper_coreml_encode(
|
||||
const whisper_coreml_context * ctx,
|
||||
float * mel,
|
||||
float * out);
|
||||
|
||||
#if __cplusplus
|
||||
}
|
||||
#endif
|
@ -1,61 +0,0 @@
|
||||
#import "coreml/whisper-encoder.h"
|
||||
#import "coreml/whisper-encoder-impl.h"
|
||||
|
||||
#import <CoreML/CoreML.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#if __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct whisper_coreml_context {
|
||||
const void * data;
|
||||
};
|
||||
|
||||
struct whisper_coreml_context * whisper_coreml_init(const char * path_model) {
|
||||
NSString * path_model_str = [[NSString alloc] initWithUTF8String:path_model];
|
||||
|
||||
NSURL * url_model = [NSURL fileURLWithPath: path_model_str];
|
||||
|
||||
const void * data = CFBridgingRetain([[CoremlEncoder alloc] initWithContentsOfURL:url_model error:nil]);
|
||||
|
||||
if (data == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
whisper_coreml_context * ctx = new whisper_coreml_context;
|
||||
|
||||
ctx->data = data;
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
void whisper_coreml_free(struct whisper_coreml_context * ctx) {
|
||||
CFRelease(ctx->data);
|
||||
delete ctx;
|
||||
}
|
||||
|
||||
void whisper_coreml_encode(
|
||||
const whisper_coreml_context * ctx,
|
||||
float * mel,
|
||||
float * out) {
|
||||
MLMultiArray * inMultiArray = [
|
||||
[MLMultiArray alloc] initWithDataPointer: mel
|
||||
shape: @[@1, @80, @3000]
|
||||
dataType: MLMultiArrayDataTypeFloat32
|
||||
strides: @[@(240000), @(3000), @1]
|
||||
deallocator: nil
|
||||
error: nil
|
||||
];
|
||||
|
||||
CoremlEncoderOutput * outCoreML = [(__bridge id) ctx->data predictionFromMelSegment:inMultiArray error:nil];
|
||||
|
||||
MLMultiArray * outMA = outCoreML.output;
|
||||
|
||||
memcpy(out, outMA.dataPointer, outMA.count * sizeof(float));
|
||||
}
|
||||
|
||||
#if __cplusplus
|
||||
}
|
||||
#endif
|
@ -72,7 +72,7 @@ int timestamp_to_sample(int64_t t, int n_samples) {
|
||||
return std::max(0, std::min((int) n_samples - 1, (int) ((t*WHISPER_SAMPLE_RATE)/100)));
|
||||
}
|
||||
|
||||
void whisper_print_segment_callback(struct whisper_context * ctx, int n_new, void * user_data) {
|
||||
void whisper_print_segment(struct whisper_context * ctx, int n_new, void * user_data) {
|
||||
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
||||
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
||||
|
||||
@ -250,7 +250,7 @@ int run(whisper_params ¶ms, std::vector<std::vector<std::string>> &result) {
|
||||
|
||||
// this callback is called on each new segment
|
||||
if (!wparams.print_realtime) {
|
||||
wparams.new_segment_callback = whisper_print_segment_callback;
|
||||
wparams.new_segment_callback = whisper_print_segment;
|
||||
wparams.new_segment_callback_user_data = &user_data;
|
||||
}
|
||||
|
||||
|
@ -109,6 +109,73 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
|
||||
struct whisper_logits_filter_user_data {
|
||||
std::vector<std::string> * allowed_commands;
|
||||
std::vector<std::vector<whisper_token>> * allowed_tokens;
|
||||
};
|
||||
|
||||
void whisper_logits_filter(
|
||||
struct whisper_context * ctx,
|
||||
const whisper_token_data * tokens,
|
||||
int n_tokens,
|
||||
float * logits,
|
||||
void * user_data){
|
||||
const auto & allowed_tokens = *((whisper_logits_filter_user_data *) user_data)->allowed_tokens;
|
||||
|
||||
printf("n_tokens = %d\n", n_tokens);
|
||||
for (int i = 0; i < n_tokens; i++) {
|
||||
printf(" - '%s' (%.2f)\n", whisper_token_to_str(ctx, tokens[i].id), logits[i]);
|
||||
}
|
||||
|
||||
if (n_tokens == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<std::pair<whisper_token, float>> pool;
|
||||
for (int i = 0; i < (int) allowed_tokens.size(); i++) {
|
||||
const int n = (int) allowed_tokens[i].size();
|
||||
if (n_tokens > n) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const whisper_token id = allowed_tokens[i][n_tokens - 1];
|
||||
pool.push_back({ id, logits[id] });
|
||||
}
|
||||
|
||||
if (pool.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
printf("applying logits filter, pool size = %d\n", (int) pool.size());
|
||||
|
||||
const int ibeg = whisper_token_beg(ctx);
|
||||
|
||||
double sum_all = 0.0;
|
||||
for (int i = 0; i < ibeg; ++i) {
|
||||
if (logits[i] == -INFINITY) {
|
||||
continue;
|
||||
}
|
||||
sum_all += logits[i];
|
||||
}
|
||||
|
||||
double sum_pool = 0.0;
|
||||
for (int i = 0; i < (int) pool.size(); ++i) {
|
||||
sum_pool += pool[i].second;
|
||||
}
|
||||
|
||||
printf("sum_all = %.2f, sum_pool = %.2f\n", sum_all, sum_pool);
|
||||
|
||||
for (int i = 0; i < ibeg; ++i) {
|
||||
logits[i] = -INFINITY;
|
||||
}
|
||||
|
||||
for (int i = 0; i < (int) pool.size(); ++i) {
|
||||
//logits[pool[i].first] = pool[i].second / sum_pool * sum_all;
|
||||
logits[pool[i].first] = pool[i].second;
|
||||
printf(" - '%s' (%.2f)\n", whisper_token_to_str(ctx, pool[i].first), logits[pool[i].first]);
|
||||
}
|
||||
}
|
||||
|
||||
std::string transcribe(whisper_context * ctx, const whisper_params & params, const std::vector<float> & pcmf32, float & prob, int64_t & t_ms) {
|
||||
const auto t_start = std::chrono::high_resolution_clock::now();
|
||||
|
||||
@ -131,6 +198,8 @@ std::string transcribe(whisper_context * ctx, const whisper_params & params, con
|
||||
wparams.audio_ctx = params.audio_ctx;
|
||||
wparams.speed_up = params.speed_up;
|
||||
|
||||
wparams.temperature_inc = -1.0f;
|
||||
|
||||
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
||||
return "";
|
||||
}
|
||||
@ -334,22 +403,31 @@ int process_command_list(struct whisper_context * ctx, audio_async &audio, const
|
||||
wparams.translate = params.translate;
|
||||
wparams.no_context = true;
|
||||
wparams.single_segment = true;
|
||||
wparams.max_tokens = 1;
|
||||
//wparams.max_tokens = 1;
|
||||
wparams.language = params.language.c_str();
|
||||
wparams.n_threads = params.n_threads;
|
||||
|
||||
wparams.audio_ctx = params.audio_ctx;
|
||||
wparams.speed_up = params.speed_up;
|
||||
|
||||
wparams.temperature_inc = -1.0f;
|
||||
|
||||
wparams.prompt_tokens = k_tokens.data();
|
||||
wparams.prompt_n_tokens = k_tokens.size();
|
||||
|
||||
whisper_logits_filter_user_data user_data = { &allowed_commands, &allowed_tokens };
|
||||
|
||||
wparams.logits_filter_callback = whisper_logits_filter;
|
||||
wparams.logits_filter_callback_user_data = &user_data;
|
||||
|
||||
// run the transformer and a single decoding pass
|
||||
if (whisper_full(ctx, wparams, pcmf32_cur.data(), pcmf32_cur.size()) != 0) {
|
||||
fprintf(stderr, "%s: ERROR: whisper_full() failed\n", __func__);
|
||||
break;
|
||||
}
|
||||
|
||||
fprintf(stdout, "%s: text - '%s'\n", __func__, whisper_full_get_segment_text(ctx, 0));
|
||||
|
||||
// estimate command probability
|
||||
// NOTE: not optimal
|
||||
{
|
||||
@ -436,7 +514,7 @@ int process_command_list(struct whisper_context * ctx, audio_async &audio, const
|
||||
|
||||
// always-prompt mode
|
||||
// transcribe the voice into text after valid prompt
|
||||
int always_prompt_transcription(struct whisper_context * ctx, audio_async & audio, const whisper_params & params) {
|
||||
int process_always_prompt(struct whisper_context * ctx, audio_async & audio, const whisper_params & params) {
|
||||
bool is_running = true;
|
||||
bool ask_prompt = true;
|
||||
|
||||
@ -496,7 +574,7 @@ int always_prompt_transcription(struct whisper_context * ctx, audio_async & audi
|
||||
const float sim = similarity(prompt, k_prompt);
|
||||
|
||||
//debug
|
||||
//fprintf(stdout, "command size: %i\n", command_length);
|
||||
//fprintf(stdout, "command size: %d, sim: %f\n", (int) command.size(), sim);
|
||||
|
||||
if ((sim > 0.7f) && (command.size() > 0)) {
|
||||
fprintf(stdout, "%s: Command '%s%s%s', (t = %d ms)\n", __func__, "\033[1m", command.c_str(), "\033[0m", (int) t_ms);
|
||||
@ -676,7 +754,7 @@ int main(int argc, char ** argv) {
|
||||
if (!params.commands.empty()) {
|
||||
ret_val = process_command_list(ctx, audio, params);
|
||||
} else if (!params.prompt.empty()) {
|
||||
ret_val = always_prompt_transcription(ctx, audio, params);
|
||||
ret_val = process_always_prompt(ctx, audio, params);
|
||||
} else {
|
||||
ret_val = process_general_transcription(ctx, audio, params);
|
||||
}
|
||||
|
@ -1,13 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include <SDL.h>
|
||||
#include <SDL_audio.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
#include <mutex>
|
||||
|
||||
#include <SDL.h>
|
||||
#include <SDL_audio.h>
|
||||
|
||||
//
|
||||
// SDL Audio capture
|
||||
//
|
||||
|
@ -193,7 +193,7 @@ struct whisper_print_user_data {
|
||||
const std::vector<std::vector<float>> * pcmf32s;
|
||||
};
|
||||
|
||||
void whisper_print_segment_callback(struct whisper_context * ctx, int n_new, void * user_data) {
|
||||
void whisper_print_segment(struct whisper_context * ctx, int n_new, void * user_data) {
|
||||
const auto & params = *((whisper_print_user_data *) user_data)->params;
|
||||
const auto & pcmf32s = *((whisper_print_user_data *) user_data)->pcmf32s;
|
||||
|
||||
@ -352,14 +352,13 @@ bool output_csv(struct whisper_context * ctx, const char * fname) {
|
||||
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
|
||||
|
||||
const int n_segments = whisper_full_n_segments(ctx);
|
||||
fout << "start,end,text\n";
|
||||
for (int i = 0; i < n_segments; ++i) {
|
||||
const char * text = whisper_full_get_segment_text(ctx, i);
|
||||
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
|
||||
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
|
||||
|
||||
//need to multiply times returned from whisper_full_get_segment_t{0,1}() by 10 to get milliseconds.
|
||||
fout << 10 * t0 << "," << 10 * t1 << ",\"" << text << "\"\n";
|
||||
fout << 10 * t0 << ", " << 10 * t1 << ", \"" << text << "\"\n";
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -598,7 +597,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
// this callback is called on each new segment
|
||||
if (!wparams.print_realtime) {
|
||||
wparams.new_segment_callback = whisper_print_segment_callback;
|
||||
wparams.new_segment_callback = whisper_print_segment;
|
||||
wparams.new_segment_callback_user_data = &user_data;
|
||||
}
|
||||
|
||||
|
@ -1,82 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script downloads Whisper model files that have already been converted to Core ML format.
|
||||
# This way you don't have to convert them yourself.
|
||||
|
||||
src="https://huggingface.co/datasets/ggerganov/whisper.cpp-coreml"
|
||||
pfx="resolve/main/ggml"
|
||||
|
||||
# get the path of this script
|
||||
function get_script_path() {
|
||||
if [ -x "$(command -v realpath)" ]; then
|
||||
echo "$(dirname $(realpath $0))"
|
||||
else
|
||||
local ret="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)"
|
||||
echo "$ret"
|
||||
fi
|
||||
}
|
||||
|
||||
models_path="$(get_script_path)"
|
||||
|
||||
# Whisper models
|
||||
models=( "tiny.en" "tiny" "base.en" "base" "small.en" "small" "medium.en" "medium" "large-v1" "large" )
|
||||
|
||||
# list available models
|
||||
function list_models {
|
||||
printf "\n"
|
||||
printf " Available models:"
|
||||
for model in "${models[@]}"; do
|
||||
printf " $model"
|
||||
done
|
||||
printf "\n\n"
|
||||
}
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
printf "Usage: $0 <model>\n"
|
||||
list_models
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
model=$1
|
||||
|
||||
if [[ ! " ${models[@]} " =~ " ${model} " ]]; then
|
||||
printf "Invalid model: $model\n"
|
||||
list_models
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# download Core ML model
|
||||
|
||||
printf "Downloading Core ML model $model from '$src' ...\n"
|
||||
|
||||
cd $models_path
|
||||
|
||||
if [ -f "ggml-$model.mlmodel" ]; then
|
||||
printf "Model $model already exists. Skipping download.\n"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -x "$(command -v wget)" ]; then
|
||||
wget --quiet --show-progress -O ggml-$model.mlmodel $src/$pfx-$model.mlmodel
|
||||
elif [ -x "$(command -v curl)" ]; then
|
||||
curl -L --output ggml-$model.mlmodel $src/$pfx-$model.mlmodel
|
||||
else
|
||||
printf "Either wget or curl is required to download models.\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
printf "Failed to download Core ML model $model \n"
|
||||
printf "Please try again later or download the original Whisper model files and convert them yourself.\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf "Done! Model '$model' saved in 'models/ggml-$model.mlmodel'\n"
|
||||
printf "Run the following command to compile it:\n\n"
|
||||
printf " $ xcrun coremlc compile ./models/ggml-$model.mlmodel ./models\n\n"
|
||||
printf "You can now use it like this:\n\n"
|
||||
printf " $ ./main -m models/ggml-$model.bin -f samples/jfk.wav\n"
|
||||
printf "\n"
|
66
whisper.cpp
66
whisper.cpp
@ -1,8 +1,5 @@
|
||||
#define WHISPER_BUILD
|
||||
#include "whisper.h"
|
||||
#if WHISPER_USE_COREML
|
||||
#include "coreml/whisper-encoder.h"
|
||||
#endif
|
||||
|
||||
#include "ggml.h"
|
||||
|
||||
@ -595,21 +592,16 @@ struct whisper_context {
|
||||
|
||||
mutable std::mt19937 rng; // used for sampling at t > 0.0
|
||||
|
||||
int lang_id = 0; // english by default
|
||||
|
||||
std::string path_model; // populated by whisper_init_from_file()
|
||||
#ifdef WHISPER_USE_COREML
|
||||
whisper_coreml_context * ctx_coreml;
|
||||
#endif
|
||||
int lang_id;
|
||||
|
||||
// [EXPERIMENTAL] token-level timestamps data
|
||||
int64_t t_beg = 0;
|
||||
int64_t t_last = 0;
|
||||
int64_t t_beg;
|
||||
int64_t t_last;
|
||||
whisper_token tid_last;
|
||||
std::vector<float> energy; // PCM signal energy
|
||||
|
||||
// [EXPERIMENTAL] speed-up techniques
|
||||
int32_t exp_n_audio_ctx = 0; // 0 - use default
|
||||
int32_t exp_n_audio_ctx; // 0 - use default
|
||||
|
||||
void use_buf(struct ggml_context * ctx, int i) {
|
||||
#if defined(WHISPER_USE_SCRATCH)
|
||||
@ -1405,7 +1397,6 @@ static bool whisper_encode(
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef WHISPER_USE_COREML
|
||||
struct ggml_tensor * cur;
|
||||
|
||||
// convolution + gelu
|
||||
@ -1714,13 +1705,6 @@ static bool whisper_encode(
|
||||
|
||||
//ggml_graph_print(&gf);
|
||||
}
|
||||
#else
|
||||
wctx.use_buf(ctx0, -1);
|
||||
|
||||
struct ggml_tensor * cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_state, n_ctx);
|
||||
|
||||
whisper_coreml_encode(wctx.ctx_coreml, (float *) mel->data, (float *) cur->data);
|
||||
#endif
|
||||
|
||||
// cur
|
||||
//{
|
||||
@ -2523,20 +2507,6 @@ static std::vector<whisper_vocab::id> tokenize(const whisper_vocab & vocab, cons
|
||||
// interface implementation
|
||||
//
|
||||
|
||||
#ifdef WHISPER_USE_COREML
|
||||
// replace .bin with .mlmodelc
|
||||
static std::string whisper_get_coreml_path(std::string path_bin) {
|
||||
auto pos = path_bin.rfind('.');
|
||||
if (pos != std::string::npos) {
|
||||
path_bin = path_bin.substr(0, pos);
|
||||
}
|
||||
|
||||
path_bin += ".mlmodelc";
|
||||
|
||||
return path_bin;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct whisper_context * whisper_init_from_file(const char * path_model) {
|
||||
whisper_model_loader loader = {};
|
||||
|
||||
@ -2549,7 +2519,6 @@ struct whisper_context * whisper_init_from_file(const char * path_model) {
|
||||
}
|
||||
|
||||
loader.context = &fin;
|
||||
|
||||
loader.read = [](void * ctx, void * output, size_t read_size) {
|
||||
std::ifstream * fin = (std::ifstream*)ctx;
|
||||
fin->read((char *)output, read_size);
|
||||
@ -2566,26 +2535,7 @@ struct whisper_context * whisper_init_from_file(const char * path_model) {
|
||||
fin->close();
|
||||
};
|
||||
|
||||
auto ctx = whisper_init(&loader);
|
||||
|
||||
if (ctx) {
|
||||
ctx->path_model = path_model;
|
||||
#ifdef WHISPER_USE_COREML
|
||||
const auto path_coreml = whisper_get_coreml_path(ctx->path_model);
|
||||
fprintf(stderr, "%s: loading Core ML model from '%s'\n", __func__, path_coreml.c_str());
|
||||
fprintf(stderr, "%s: first run on a device may take a while ...\n", __func__);
|
||||
|
||||
ctx->ctx_coreml = whisper_coreml_init(path_coreml.c_str());
|
||||
if (!ctx->ctx_coreml) {
|
||||
fprintf(stderr, "%s: failed to load Core ML model from '%s'\n", __func__, path_coreml.c_str());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: Core ML model loaded\n", __func__);
|
||||
#endif
|
||||
}
|
||||
|
||||
return ctx;
|
||||
return whisper_init(&loader);
|
||||
}
|
||||
|
||||
struct whisper_context * whisper_init_from_buffer(void * buffer, size_t buffer_size) {
|
||||
@ -2657,10 +2607,6 @@ void whisper_free(struct whisper_context * ctx) {
|
||||
ggml_free(ctx->decoders[i].kv_self.ctx);
|
||||
}
|
||||
}
|
||||
#ifdef WHISPER_USE_COREML
|
||||
whisper_coreml_free(ctx->ctx_coreml);
|
||||
ctx->ctx_coreml = nullptr;
|
||||
#endif
|
||||
delete ctx;
|
||||
}
|
||||
}
|
||||
@ -4393,7 +4339,7 @@ int whisper_full_n_segments(struct whisper_context * ctx) {
|
||||
}
|
||||
|
||||
int whisper_full_lang_id(struct whisper_context * ctx) {
|
||||
return ctx->lang_id;
|
||||
return ctx->lang_id;
|
||||
}
|
||||
|
||||
int64_t whisper_full_get_segment_t0(struct whisper_context * ctx, int i_segment) {
|
||||
|
Reference in New Issue
Block a user