mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-02-03 11:59:25 +01:00
talk-llama : sync llama.cpp
This commit is contained in:
parent
5089ab2d6a
commit
24d706774d
@ -113,7 +113,7 @@ static void llama_sampler_softmax_impl(llama_token_data_array * cur_p) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void llama_sampler_top_k_impl(llama_token_data_array * cur_p, int32_t k) {
|
static void llama_sampler_top_k_impl(llama_token_data_array * cur_p, int32_t k) {
|
||||||
// TODO: move bucket sort to separate function so that top_p/tail_free/typical/softmax first is equally fast
|
// TODO: move bucket sort to separate function so that top_p/typical/softmax first is equally fast
|
||||||
// if (k >= (int32_t)cur_p->size) {
|
// if (k >= (int32_t)cur_p->size) {
|
||||||
// return;
|
// return;
|
||||||
// }
|
// }
|
||||||
@ -733,101 +733,6 @@ struct llama_sampler * llama_sampler_init_min_p(float p, size_t min_keep) {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// tail-free
|
|
||||||
|
|
||||||
struct llama_sampler_tail_free {
|
|
||||||
const float z;
|
|
||||||
const size_t min_keep;
|
|
||||||
};
|
|
||||||
|
|
||||||
static const char * llama_sampler_tail_free_name(const struct llama_sampler * /*smpl*/) {
|
|
||||||
return "tail-free";
|
|
||||||
}
|
|
||||||
|
|
||||||
static void llama_sampler_tail_free_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
|
|
||||||
const auto * ctx = (llama_sampler_tail_free *) smpl->ctx;
|
|
||||||
|
|
||||||
if (ctx->z >= 1.0f || cur_p->size <= 2) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_sampler_softmax_impl(cur_p);
|
|
||||||
|
|
||||||
// Compute the first and second derivatives
|
|
||||||
std::vector<float> first_derivatives(cur_p->size - 1);
|
|
||||||
std::vector<float> second_derivatives(cur_p->size - 2);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < first_derivatives.size(); ++i) {
|
|
||||||
first_derivatives[i] = cur_p->data[i].p - cur_p->data[i + 1].p;
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < second_derivatives.size(); ++i) {
|
|
||||||
second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate absolute value of second derivatives
|
|
||||||
for (size_t i = 0; i < second_derivatives.size(); ++i) {
|
|
||||||
second_derivatives[i] = std::abs(second_derivatives[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normalize the second derivatives
|
|
||||||
{
|
|
||||||
const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
|
|
||||||
|
|
||||||
if (second_derivatives_sum > 1e-6f) {
|
|
||||||
for (float & value : second_derivatives) {
|
|
||||||
value /= second_derivatives_sum;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for (float & value : second_derivatives) {
|
|
||||||
value = 1.0f / second_derivatives.size();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
float cum_sum = 0.0f;
|
|
||||||
size_t last_idx = cur_p->size;
|
|
||||||
for (size_t i = 0; i < second_derivatives.size(); ++i) {
|
|
||||||
cum_sum += second_derivatives[i];
|
|
||||||
|
|
||||||
// Check if the running sum is greater than z or if we have kept at least min_keep tokens
|
|
||||||
if (cum_sum > ctx->z && i >= ctx->min_keep) {
|
|
||||||
last_idx = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resize the output vector to keep only the tokens above the tail location
|
|
||||||
cur_p->size = last_idx;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct llama_sampler * llama_sampler_tail_free_clone(const struct llama_sampler * smpl) {
|
|
||||||
const auto * ctx = (const llama_sampler_tail_free *) smpl->ctx;
|
|
||||||
return llama_sampler_init_tail_free(ctx->z, ctx->min_keep);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void llama_sampler_tail_free_free(struct llama_sampler * smpl) {
|
|
||||||
delete (llama_sampler_tail_free *) smpl->ctx;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct llama_sampler_i llama_sampler_tail_free_i = {
|
|
||||||
/* .name = */ llama_sampler_tail_free_name,
|
|
||||||
/* .accept = */ nullptr,
|
|
||||||
/* .apply = */ llama_sampler_tail_free_apply,
|
|
||||||
/* .reset = */ nullptr,
|
|
||||||
/* .clone = */ llama_sampler_tail_free_clone,
|
|
||||||
/* .free = */ llama_sampler_tail_free_free,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_tail_free(float z, size_t min_keep) {
|
|
||||||
return new llama_sampler {
|
|
||||||
/* .iface = */ &llama_sampler_tail_free_i,
|
|
||||||
/* .ctx = */ new llama_sampler_tail_free {
|
|
||||||
/* .z = */ z,
|
|
||||||
/*. min_keep = */ min_keep,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// typical
|
// typical
|
||||||
|
|
||||||
struct llama_sampler_typical {
|
struct llama_sampler_typical {
|
||||||
@ -1971,8 +1876,11 @@ static void llama_sampler_dry_reset(struct llama_sampler * smpl) {
|
|||||||
static struct llama_sampler * llama_sampler_dry_clone(const struct llama_sampler * smpl) {
|
static struct llama_sampler * llama_sampler_dry_clone(const struct llama_sampler * smpl) {
|
||||||
const auto * ctx = (llama_sampler_dry *) smpl->ctx;
|
const auto * ctx = (llama_sampler_dry *) smpl->ctx;
|
||||||
|
|
||||||
// nullptr is passed as vocab because it is only needed for raw sequence breaker processing, which we have already done and will be copying
|
llama_vocab dummy_vocab;
|
||||||
auto * result = llama_sampler_init_dry(nullptr, ctx->dry_multiplier, ctx->dry_base, ctx->dry_allowed_length, ctx->dry_penalty_last_n, NULL, 0);
|
|
||||||
|
// dummy vocab is passed because it is only needed for raw sequence breaker processing, which we have already done and will simply be copying
|
||||||
|
auto * result = llama_sampler_init_dry_impl(dummy_vocab, ctx->total_context_size, ctx->dry_multiplier, ctx->dry_base, ctx->dry_allowed_length, ctx->dry_penalty_last_n, NULL, 0);
|
||||||
|
|
||||||
// Copy the state, including the processed breakers
|
// Copy the state, including the processed breakers
|
||||||
{
|
{
|
||||||
auto * result_ctx = (llama_sampler_dry *) result->ctx;
|
auto * result_ctx = (llama_sampler_dry *) result->ctx;
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -2,6 +2,7 @@
|
|||||||
#define LLAMA_H
|
#define LLAMA_H
|
||||||
|
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
|
#include "ggml-cpu.h"
|
||||||
#include "ggml-backend.h"
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
@ -205,7 +206,7 @@ extern "C" {
|
|||||||
enum llama_split_mode {
|
enum llama_split_mode {
|
||||||
LLAMA_SPLIT_MODE_NONE = 0, // single GPU
|
LLAMA_SPLIT_MODE_NONE = 0, // single GPU
|
||||||
LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
|
LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
|
||||||
LLAMA_SPLIT_MODE_ROW = 2, // split rows across GPUs
|
LLAMA_SPLIT_MODE_ROW = 2, // split layers and KV across GPUs, use tensor parallelism if supported
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: simplify (https://github.com/ggerganov/llama.cpp/pull/9294#pullrequestreview-2286561979)
|
// TODO: simplify (https://github.com/ggerganov/llama.cpp/pull/9294#pullrequestreview-2286561979)
|
||||||
@ -274,10 +275,7 @@ extern "C" {
|
|||||||
int32_t n_gpu_layers; // number of layers to store in VRAM
|
int32_t n_gpu_layers; // number of layers to store in VRAM
|
||||||
enum llama_split_mode split_mode; // how to split the model across multiple GPUs
|
enum llama_split_mode split_mode; // how to split the model across multiple GPUs
|
||||||
|
|
||||||
// main_gpu interpretation depends on split_mode:
|
// the GPU that is used for the entire model when split_mode is LLAMA_SPLIT_MODE_NONE
|
||||||
// LLAMA_SPLIT_MODE_NONE: the GPU that is used for the entire model
|
|
||||||
// LLAMA_SPLIT_MODE_ROW: the GPU that is used for small tensors and intermediate results
|
|
||||||
// LLAMA_SPLIT_MODE_LAYER: ignored
|
|
||||||
int32_t main_gpu;
|
int32_t main_gpu;
|
||||||
|
|
||||||
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
|
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
|
||||||
@ -799,7 +797,7 @@ extern "C" {
|
|||||||
// Processes a batch of tokens with the ecoder part of the encoder-decoder model.
|
// Processes a batch of tokens with the ecoder part of the encoder-decoder model.
|
||||||
// Stores the encoder output internally for later use by the decoder cross-attention layers.
|
// Stores the encoder output internally for later use by the decoder cross-attention layers.
|
||||||
// 0 - success
|
// 0 - success
|
||||||
// < 0 - error
|
// < 0 - error. the KV cache state is restored to the state before this call
|
||||||
LLAMA_API int32_t llama_encode(
|
LLAMA_API int32_t llama_encode(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
struct llama_batch batch);
|
struct llama_batch batch);
|
||||||
@ -807,7 +805,7 @@ extern "C" {
|
|||||||
// Positive return values does not mean a fatal error, but rather a warning.
|
// Positive return values does not mean a fatal error, but rather a warning.
|
||||||
// 0 - success
|
// 0 - success
|
||||||
// 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
|
// 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
|
||||||
// < 0 - error
|
// < 0 - error. the KV cache state is restored to the state before this call
|
||||||
LLAMA_API int32_t llama_decode(
|
LLAMA_API int32_t llama_decode(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
struct llama_batch batch);
|
struct llama_batch batch);
|
||||||
@ -1087,9 +1085,6 @@ extern "C" {
|
|||||||
/// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
|
/// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
|
||||||
LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep);
|
LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep);
|
||||||
|
|
||||||
/// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
|
|
||||||
LLAMA_API struct llama_sampler * llama_sampler_init_tail_free (float z, size_t min_keep);
|
|
||||||
|
|
||||||
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
|
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
|
||||||
LLAMA_API struct llama_sampler * llama_sampler_init_typical (float p, size_t min_keep);
|
LLAMA_API struct llama_sampler * llama_sampler_init_typical (float p, size_t min_keep);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user