mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-06-30 22:40:14 +02:00
ggml : sync latest ggml lib
This commit is contained in:
@ -26,8 +26,9 @@ struct gpt_params {
|
||||
|
||||
int32_t n_batch = 8; // batch size for prompt processing
|
||||
|
||||
std::string model = "models/gpt-2-117M/ggml-model.bin"; // model path
|
||||
std::string prompt;
|
||||
std::string model = "models/gpt-2-117M/ggml-model.bin"; // model path
|
||||
std::string prompt = "";
|
||||
std::string token_test = "";
|
||||
};
|
||||
|
||||
bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
|
||||
@ -61,6 +62,12 @@ struct gpt_vocab {
|
||||
// poor-man's JSON parsing
|
||||
std::map<std::string, int32_t> json_parse(const std::string & fname);
|
||||
|
||||
std::string convert_to_utf8(const std::wstring & input);
|
||||
|
||||
std::wstring convert_to_wstring(const std::string & input);
|
||||
|
||||
void gpt_split_words(std::string str, std::vector<std::string>& words);
|
||||
|
||||
// split text into tokens
|
||||
//
|
||||
// ref: https://github.com/openai/gpt-2/blob/a74da5d99abaaba920de8131d64da2862a8f213b/src/encoder.py#L53
|
||||
@ -73,6 +80,15 @@ std::map<std::string, int32_t> json_parse(const std::string & fname);
|
||||
//
|
||||
std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::string & text);
|
||||
|
||||
// test outputs of gpt_tokenize
|
||||
//
|
||||
// - compare with tokens generated by the huggingface tokenizer
|
||||
// - test cases are chosen based on the model's main language (under 'prompt' directory)
|
||||
// - if all sentences are tokenized identically, print 'All tests passed.'
|
||||
// - otherwise, print sentence, huggingface tokens, ggml tokens
|
||||
//
|
||||
void test_gpt_tokenizer(gpt_vocab & vocab, const std::string & fpath_test);
|
||||
|
||||
// load the tokens from encoder.json
|
||||
bool gpt_vocab_init(const std::string & fname, gpt_vocab & vocab);
|
||||
|
||||
@ -92,6 +108,18 @@ gpt_vocab::id gpt_sample_top_k_top_p(
|
||||
double temp,
|
||||
std::mt19937 & rng);
|
||||
|
||||
gpt_vocab::id gpt_sample_top_k_top_p_repeat(
|
||||
const gpt_vocab & vocab,
|
||||
const float * logits,
|
||||
const int32_t * last_n_tokens_data,
|
||||
size_t last_n_tokens_data_size,
|
||||
int top_k,
|
||||
double top_p,
|
||||
double temp,
|
||||
int repeat_last_n,
|
||||
float repeat_penalty,
|
||||
std::mt19937 & rng);
|
||||
|
||||
//
|
||||
// Audio utils
|
||||
//
|
||||
|
Reference in New Issue
Block a user