mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-06-05 01:07:19 +02:00
1133 lines
37 KiB
C++
1133 lines
37 KiB
C++
#include "llama-kv-cache-recurrent.h"
|
|
|
|
#include "llama-impl.h"
|
|
#include "llama-batch.h"
|
|
#include "llama-model.h"
|
|
|
|
#include <algorithm>
|
|
#include <cassert>
|
|
#include <limits>
|
|
#include <map>
|
|
#include <stdexcept>
|
|
|
|
//
|
|
// llama_kv_cache_recurrent
|
|
//
|
|
|
|
llama_kv_cache_recurrent::llama_kv_cache_recurrent(
|
|
const llama_model & model,
|
|
ggml_type type_k,
|
|
ggml_type type_v,
|
|
bool offload,
|
|
uint32_t kv_size,
|
|
uint32_t n_seq_max) : hparams(model.hparams), n_seq_max(n_seq_max) {
|
|
const int32_t n_layer = hparams.n_layer;
|
|
|
|
LLAMA_LOG_INFO("%s: kv_size = %u, n_seq_max = %u, type_k = '%s', type_v = '%s', n_layer = %d\n",
|
|
__func__, kv_size, n_seq_max, ggml_type_name(type_k), ggml_type_name(type_v), n_layer);
|
|
|
|
head = 0;
|
|
size = kv_size;
|
|
used = 0;
|
|
|
|
cells.clear();
|
|
cells.resize(kv_size);
|
|
|
|
// create a context for each buffer type
|
|
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
|
|
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
|
|
auto it = ctx_map.find(buft);
|
|
if (it == ctx_map.end()) {
|
|
ggml_init_params params = {
|
|
/*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()),
|
|
/*.mem_buffer =*/ NULL,
|
|
/*.no_alloc =*/ true,
|
|
};
|
|
|
|
ggml_context * ctx = ggml_init(params);
|
|
if (!ctx) {
|
|
return nullptr;
|
|
}
|
|
|
|
ctx_map[buft] = ctx;
|
|
ctxs.emplace_back(ctx);
|
|
|
|
return ctx;
|
|
}
|
|
|
|
return it->second;
|
|
};
|
|
|
|
k_l.reserve(n_layer);
|
|
v_l.reserve(n_layer);
|
|
|
|
for (int i = 0; i < n_layer; i++) {
|
|
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
|
|
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
|
|
|
|
const char * dev_name = "CPU";
|
|
|
|
ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type();
|
|
|
|
if (offload) {
|
|
auto * dev = model.dev_layer(i);
|
|
buft = ggml_backend_dev_buffer_type(dev);
|
|
|
|
dev_name = ggml_backend_dev_name(dev);
|
|
}
|
|
|
|
LLAMA_LOG_DEBUG("%s, layer %3d: dev = %s\n", __func__, i, dev_name);
|
|
|
|
ggml_context * ctx = ctx_for_buft(buft);
|
|
if (!ctx) {
|
|
throw std::runtime_error("failed to create ggml context for kv cache");
|
|
}
|
|
|
|
ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size);
|
|
ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size);
|
|
ggml_format_name(k, "cache_k_l%d", i);
|
|
ggml_format_name(v, "cache_v_l%d", i);
|
|
k_l.push_back(k);
|
|
v_l.push_back(v);
|
|
}
|
|
|
|
// allocate tensors and initialize the buffers to avoid NaNs in the padding
|
|
for (auto it : ctx_map) {
|
|
auto * buft = it.first;
|
|
auto * ctx = it.second;
|
|
|
|
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
|
|
if (!buf) {
|
|
throw std::runtime_error("failed to allocate buffer for kv cache");
|
|
}
|
|
ggml_backend_buffer_clear(buf, 0);
|
|
LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
|
|
bufs.emplace_back(buf);
|
|
}
|
|
|
|
{
|
|
const size_t memory_size_k = size_k_bytes();
|
|
const size_t memory_size_v = size_v_bytes();
|
|
|
|
LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
|
|
(float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
|
|
ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
|
|
ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
|
|
}
|
|
}
|
|
|
|
void llama_kv_cache_recurrent::clear() {
|
|
for (int32_t i = 0; i < (int32_t) size; ++i) {
|
|
cells[i].pos = -1;
|
|
cells[i].seq_id.clear();
|
|
cells[i].src = -1;
|
|
cells[i].tail = -1;
|
|
}
|
|
head = 0;
|
|
used = 0;
|
|
|
|
for (auto & buf : bufs) {
|
|
ggml_backend_buffer_clear(buf.get(), 0);
|
|
}
|
|
}
|
|
|
|
bool llama_kv_cache_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
|
|
uint32_t new_head = size;
|
|
|
|
if (p0 < 0) {
|
|
p0 = 0;
|
|
}
|
|
|
|
if (p1 < 0) {
|
|
p1 = std::numeric_limits<llama_pos>::max();
|
|
}
|
|
|
|
// models like Mamba or RWKV can't have a state partially erased
|
|
if (seq_id >= (int64_t) size) {
|
|
// could be fatal
|
|
return false;
|
|
}
|
|
if (0 <= seq_id) {
|
|
int32_t & tail_id = cells[seq_id].tail;
|
|
if (tail_id >= 0) {
|
|
const kv_cell & cell = cells[tail_id];
|
|
// partial intersection is invalid
|
|
if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) {
|
|
return false;
|
|
}
|
|
// invalidate tails which will be cleared
|
|
if (p0 <= cell.pos && cell.pos < p1) {
|
|
tail_id = -1;
|
|
}
|
|
}
|
|
} else {
|
|
// seq_id is negative, then the range should include everything or nothing
|
|
if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
for (uint32_t i = 0; i < size; ++i) {
|
|
if (cells[i].pos >= p0 && cells[i].pos < p1) {
|
|
if (seq_id < 0) {
|
|
cells[i].seq_id.clear();
|
|
} else if (cells[i].has_seq_id(seq_id)) {
|
|
cells[i].seq_id.erase(seq_id);
|
|
} else {
|
|
continue;
|
|
}
|
|
if (cells[i].is_empty()) {
|
|
// keep count of the number of used cells
|
|
if (cells[i].pos >= 0) {
|
|
used--;
|
|
}
|
|
cells[i].pos = -1;
|
|
cells[i].src = -1;
|
|
if (new_head == size) {
|
|
new_head = i;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// If we freed up a slot, set head to it so searching can start there.
|
|
if (new_head != size && new_head < head) {
|
|
head = new_head;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
|
|
if (seq_id_src == seq_id_dst) {
|
|
return;
|
|
}
|
|
|
|
if (p0 < 0) {
|
|
p0 = 0;
|
|
}
|
|
|
|
if (p1 < 0) {
|
|
p1 = std::numeric_limits<llama_pos>::max();
|
|
}
|
|
|
|
if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) {
|
|
kv_cell & tail_src = cells[seq_id_src];
|
|
kv_cell & tail_dst = cells[seq_id_dst];
|
|
if (tail_dst.tail >= 0) {
|
|
// clear destination seq_id if it wasn't empty
|
|
kv_cell & cell_dst = cells[tail_dst.tail];
|
|
|
|
cell_dst.seq_id.erase(seq_id_dst);
|
|
tail_dst.tail = -1;
|
|
if (cell_dst.seq_id.empty()) {
|
|
cell_dst.pos = -1;
|
|
cell_dst.src = -1;
|
|
used -= 1;
|
|
}
|
|
}
|
|
if (tail_src.tail >= 0) {
|
|
kv_cell & cell_src = cells[tail_src.tail];
|
|
|
|
cell_src.seq_id.insert(seq_id_dst);
|
|
tail_dst.tail = tail_src.tail;
|
|
}
|
|
}
|
|
}
|
|
|
|
void llama_kv_cache_recurrent::seq_keep(llama_seq_id seq_id) {
|
|
uint32_t new_head = size;
|
|
|
|
for (uint32_t i = 0; i < size; ++i) {
|
|
if ((llama_seq_id) i != seq_id) {
|
|
cells[i].tail = -1;
|
|
}
|
|
|
|
if (!cells[i].has_seq_id(seq_id)) {
|
|
if (cells[i].pos >= 0) {
|
|
used--;
|
|
}
|
|
|
|
cells[i].pos = -1;
|
|
cells[i].src = -1;
|
|
cells[i].seq_id.clear();
|
|
|
|
if (new_head == size){
|
|
new_head = i;
|
|
}
|
|
} else {
|
|
cells[i].seq_id.clear();
|
|
cells[i].seq_id.insert(seq_id);
|
|
}
|
|
}
|
|
|
|
// If we freed up a slot, set head to it so searching can start there.
|
|
if (new_head != size && new_head < head) {
|
|
head = new_head;
|
|
}
|
|
}
|
|
|
|
void llama_kv_cache_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
|
|
if (shift == 0) {
|
|
return;
|
|
}
|
|
|
|
if (p0 < 0) {
|
|
p0 = 0;
|
|
}
|
|
|
|
if (p1 < 0) {
|
|
p1 = std::numeric_limits<llama_pos>::max();
|
|
}
|
|
|
|
// If there is no range then return early to avoid looping over the
|
|
if (p0 == p1) {
|
|
return;
|
|
}
|
|
|
|
// for Mamba-like or RWKV models, only the pos needs to be shifted
|
|
if (0 <= seq_id && seq_id < (int64_t) size) {
|
|
const int32_t tail_id = cells[seq_id].tail;
|
|
if (tail_id >= 0) {
|
|
kv_cell & cell = cells[tail_id];
|
|
if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
|
|
cell.pos += shift;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void llama_kv_cache_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
|
|
if (d == 1) {
|
|
return;
|
|
}
|
|
|
|
if (p0 < 0) {
|
|
p0 = 0;
|
|
}
|
|
|
|
if (p1 < 0) {
|
|
p1 = std::numeric_limits<llama_pos>::max();
|
|
}
|
|
|
|
// If there is no range then return early to avoid looping over the cache.
|
|
if (p0 == p1) {
|
|
return;
|
|
}
|
|
|
|
// for Mamba-like or RWKV models, only the pos needs to be changed
|
|
if (0 <= seq_id && seq_id < (int64_t) size) {
|
|
const int32_t tail_id = cells[seq_id].tail;
|
|
if (tail_id >= 0) {
|
|
kv_cell & cell = cells[tail_id];
|
|
if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
|
|
cell.pos /= d;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
llama_pos llama_kv_cache_recurrent::seq_pos_min(llama_seq_id seq_id) const {
|
|
llama_pos result = std::numeric_limits<llama_pos>::max();
|
|
|
|
for (uint32_t i = 0; i < size; ++i) {
|
|
if (cells[i].has_seq_id(seq_id)) {
|
|
result = std::min(result, cells[i].pos);
|
|
}
|
|
}
|
|
|
|
if (result == std::numeric_limits<llama_pos>::max()) {
|
|
result = -1;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const {
|
|
llama_pos result = -1;
|
|
|
|
for (uint32_t i = 0; i < size; ++i) {
|
|
if (cells[i].has_seq_id(seq_id)) {
|
|
result = std::max(result, cells[i].pos);
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_pooled, bool logits_all) {
|
|
GGML_UNUSED(embd_pooled);
|
|
|
|
auto sbatch = llama_sbatch(batch, hparams.n_embd, false, logits_all);
|
|
|
|
std::vector<llama_ubatch> ubatches;
|
|
|
|
while (sbatch.n_tokens > 0) {
|
|
llama_ubatch ubatch;
|
|
|
|
if (embd_pooled) {
|
|
// Pooled embeddings cannot be split across ubatches (yet)
|
|
ubatch = sbatch.split_seq(n_ubatch);
|
|
} else {
|
|
ubatch = sbatch.split_equal(n_ubatch);
|
|
}
|
|
|
|
ubatches.push_back(ubatch);
|
|
}
|
|
|
|
if (!prepare(ubatches)) {
|
|
return std::make_unique<llama_kv_cache_recurrent_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
|
|
}
|
|
|
|
return std::make_unique<llama_kv_cache_recurrent_state>(LLAMA_MEMORY_STATUS_SUCCESS, this, std::move(sbatch), std::move(ubatches));
|
|
}
|
|
|
|
llama_memory_state_ptr llama_kv_cache_recurrent::init_full() {
|
|
return std::make_unique<llama_kv_cache_recurrent_state>(LLAMA_MEMORY_STATUS_SUCCESS, this);
|
|
}
|
|
|
|
bool llama_kv_cache_recurrent::prepare(const std::vector<llama_ubatch> & ubatches) {
|
|
// simply remember the full state because it is very small for this type of cache
|
|
// TODO: optimize
|
|
auto org_cells = cells;
|
|
auto org_used = used;
|
|
auto org_head = head;
|
|
|
|
bool success = true;
|
|
|
|
// TODO: here we have to verify that all ubatches can fit in the cells
|
|
// however, the current implementation is broken because it relies on s_copy() and s_mask() to update the cells
|
|
// during the compute of each ubatch. to reproduce, uncomment the following loop and run:
|
|
//
|
|
// $ llama-parallel -m ./mamba-130m/ggml-model-f16.gguf -np 5 -ns 8
|
|
//
|
|
// recovery from failures when the batch does not fit in the KV cache will not work correctly until this is fixed
|
|
//
|
|
GGML_UNUSED(ubatches);
|
|
//for (const auto & ubatch : ubatches) {
|
|
// if (!find_slot(ubatch)) {
|
|
// success = false;
|
|
// break;
|
|
// }
|
|
//}
|
|
|
|
// restore the original state
|
|
cells = std::move(org_cells);
|
|
used = org_used;
|
|
head = org_head;
|
|
|
|
return success;
|
|
}
|
|
|
|
bool llama_kv_cache_recurrent::update(llama_context & lctx) {
|
|
GGML_UNUSED(lctx);
|
|
// noop
|
|
return false;
|
|
}
|
|
|
|
void llama_kv_cache_recurrent::defrag_sched(float thold) {
|
|
GGML_UNUSED(thold);
|
|
// noop
|
|
}
|
|
|
|
bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) {
|
|
const uint32_t n_tokens = ubatch.n_tokens;
|
|
const uint32_t n_seqs = ubatch.n_seqs;
|
|
|
|
const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
|
|
|
|
// if we have enough unused cells before the current head ->
|
|
// better to start searching from the beginning of the cache, hoping to fill it
|
|
if (head > used + 2*n_tokens) {
|
|
head = 0;
|
|
}
|
|
|
|
// For recurrent state architectures (like Mamba or RWKV),
|
|
// each cache cell can store the state for a whole sequence.
|
|
// A slot should be always be contiguous.
|
|
|
|
// can only process batches with an equal number of new tokens in each sequence
|
|
GGML_ASSERT(ubatch.equal_seqs);
|
|
|
|
int32_t min = size - 1;
|
|
int32_t max = 0;
|
|
|
|
// everything should fit if all seq_ids are smaller than the max
|
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
|
const uint32_t n_seq_id = ubatch.n_seq_id[s];
|
|
for (uint32_t j = 0; j < n_seq_id; ++j) {
|
|
const llama_seq_id seq_id = ubatch.seq_id[s][j];
|
|
|
|
if (seq_id < 0 || (uint32_t) seq_id >= size) {
|
|
// too big seq_id
|
|
// TODO: would it be possible to resize the cache instead?
|
|
LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%u Try using a bigger --parallel value\n", __func__, seq_id, n_seq_max);
|
|
return false;
|
|
}
|
|
if (j > 0) {
|
|
kv_cell & seq = cells[seq_id];
|
|
if (seq.tail >= 0) {
|
|
kv_cell & cell = cells[seq.tail];
|
|
// clear cells from seq_ids that become shared
|
|
// (should not normally happen, but let's handle it anyway)
|
|
cell.seq_id.erase(seq_id);
|
|
seq.tail = -1;
|
|
if (cell.seq_id.empty()) {
|
|
cell.pos = -1;
|
|
cell.src = -1;
|
|
used -= 1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
#ifndef NDEBUG
|
|
{
|
|
std::vector<int32_t> tails_verif;
|
|
tails_verif.assign(size, -1);
|
|
for (uint32_t i = 0; i < size; ++i) {
|
|
kv_cell & cell = cells[i];
|
|
for (llama_seq_id seq_id : cell.seq_id) {
|
|
if (tails_verif[seq_id] != -1) {
|
|
LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
|
|
}
|
|
tails_verif[seq_id] = i;
|
|
}
|
|
}
|
|
for (uint32_t i = 0; i < size; ++i) {
|
|
if (tails_verif[i] != cells[i].tail) {
|
|
LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]);
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
// find next empty cell
|
|
uint32_t next_empty_cell = head;
|
|
|
|
for (uint32_t i = 0; i < size; ++i) {
|
|
if (next_empty_cell >= size) { next_empty_cell -= size; }
|
|
kv_cell & cell = cells[next_empty_cell];
|
|
if (cell.is_empty()) { break; }
|
|
next_empty_cell += 1;
|
|
}
|
|
|
|
// find usable cell range
|
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
|
const llama_seq_id seq_id = ubatch.seq_id[s][0];
|
|
kv_cell & seq_meta = cells[seq_id];
|
|
bool has_cell = false;
|
|
if (seq_meta.tail >= 0) {
|
|
kv_cell & cell = cells[seq_meta.tail];
|
|
GGML_ASSERT(cell.has_seq_id(seq_id));
|
|
// does this seq_id "own" the cell?
|
|
if (cell.seq_id.size() == 1) { has_cell = true; }
|
|
}
|
|
if (!has_cell) {
|
|
kv_cell & empty_cell = cells[next_empty_cell];
|
|
GGML_ASSERT(empty_cell.is_empty());
|
|
// copy old tail into the empty cell
|
|
if (seq_meta.tail >= 0) {
|
|
kv_cell & orig_cell = cells[seq_meta.tail];
|
|
empty_cell.pos = orig_cell.pos;
|
|
empty_cell.src = orig_cell.src;
|
|
orig_cell.seq_id.erase(seq_id);
|
|
empty_cell.seq_id.insert(seq_id); // will be overwritten
|
|
}
|
|
seq_meta.tail = next_empty_cell;
|
|
// find next empty cell
|
|
if (s + 1 < n_seqs) {
|
|
next_empty_cell += 1;
|
|
for (uint32_t i = 0; i < size; ++i) {
|
|
if (next_empty_cell >= size) { next_empty_cell -= size; }
|
|
kv_cell & cell = cells[next_empty_cell];
|
|
if (cell.is_empty()) { break; }
|
|
next_empty_cell += 1;
|
|
}
|
|
}
|
|
}
|
|
if (min > seq_meta.tail) { min = seq_meta.tail; }
|
|
if (max < seq_meta.tail) { max = seq_meta.tail; }
|
|
}
|
|
|
|
// gather and re-order
|
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
|
int32_t dst_id = s + min;
|
|
int32_t src_id = cells[ubatch.seq_id[s][0]].tail;
|
|
if (dst_id != src_id) {
|
|
kv_cell & dst_cell = cells[dst_id];
|
|
kv_cell & src_cell = cells[src_id];
|
|
|
|
std::swap(dst_cell.pos, src_cell.pos);
|
|
std::swap(dst_cell.src, src_cell.src);
|
|
std::swap(dst_cell.seq_id, src_cell.seq_id);
|
|
|
|
// swap tails (assuming they NEVER overlap)
|
|
for (const llama_seq_id seq_id : src_cell.seq_id) {
|
|
cells[seq_id].tail = src_id;
|
|
}
|
|
for (const llama_seq_id seq_id : dst_cell.seq_id) {
|
|
cells[seq_id].tail = dst_id;
|
|
}
|
|
}
|
|
}
|
|
|
|
// update the pos of the used seqs
|
|
for (uint32_t s = 0; s < n_seqs; ++s) {
|
|
const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1];
|
|
int32_t cell_id = s + min;
|
|
kv_cell & cell = cells[cell_id];
|
|
|
|
if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
|
|
// What should happen when the pos backtracks or skips a value?
|
|
// Clearing the state mid-batch would require special-casing which isn't done.
|
|
LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
|
|
__func__, last_pos, cell.pos, ubatch.seq_id[s][0], n_seq_tokens);
|
|
}
|
|
cell.pos = last_pos;
|
|
cell.seq_id.clear();
|
|
for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) {
|
|
const llama_seq_id seq_id = ubatch.seq_id[s][j];
|
|
cell.seq_id.insert(seq_id);
|
|
cells[seq_id].tail = cell_id;
|
|
}
|
|
}
|
|
|
|
// allow getting the range of used cells, from head to head + n
|
|
head = min;
|
|
n = max - min + 1;
|
|
used = std::count_if(cells.begin(), cells.end(),
|
|
[](const kv_cell & cell){ return !cell.is_empty(); });
|
|
|
|
// sanity check
|
|
return n >= n_seqs;
|
|
}
|
|
|
|
bool llama_kv_cache_recurrent::get_can_shift() const {
|
|
return false;
|
|
}
|
|
|
|
int32_t llama_kv_cache_recurrent::s_copy(int i) const {
|
|
const uint32_t cell_id = i + head;
|
|
|
|
//////////////////////////////////////////////
|
|
// TODO: this should not mutate the KV cache !
|
|
kv_cell & cell = const_cast<kv_cell &>(cells[cell_id]);
|
|
|
|
// prevent out-of-bound sources
|
|
if (cell.src < 0 || (uint32_t) cell.src >= size) {
|
|
cell.src = cell_id;
|
|
}
|
|
|
|
int32_t res = cell.src;
|
|
|
|
// TODO: do not mutate the KV cache
|
|
// ensure copy only happens once
|
|
if (cell.src != (int32_t) cell_id) {
|
|
cell.src = cell_id;
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
float llama_kv_cache_recurrent::s_mask(int i) const {
|
|
const uint32_t cell_id = i + head;
|
|
|
|
//////////////////////////////////////////////
|
|
// TODO: this should not mutate the KV cache !
|
|
kv_cell & cell = const_cast<kv_cell &>(cells[cell_id]);
|
|
|
|
float res = (float) (cell.src >= 0);
|
|
|
|
// only clear once
|
|
if (cell.src < 0) {
|
|
cell.src = cell_id;
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
size_t llama_kv_cache_recurrent::total_size() const {
|
|
size_t size = 0;
|
|
for (const auto & buf : bufs) {
|
|
size += ggml_backend_buffer_get_size(buf.get());
|
|
}
|
|
|
|
return size;
|
|
}
|
|
|
|
size_t llama_kv_cache_recurrent::size_k_bytes() const {
|
|
size_t size_k_bytes = 0;
|
|
|
|
for (const auto & k : k_l) {
|
|
size_k_bytes += ggml_nbytes(k);
|
|
}
|
|
|
|
return size_k_bytes;
|
|
}
|
|
|
|
size_t llama_kv_cache_recurrent::size_v_bytes() const {
|
|
size_t size_v_bytes = 0;
|
|
|
|
for (const auto & v : v_l) {
|
|
size_v_bytes += ggml_nbytes(v);
|
|
}
|
|
|
|
return size_v_bytes;
|
|
}
|
|
|
|
void llama_kv_cache_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
|
|
std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
|
|
uint32_t cell_count = 0;
|
|
|
|
// Count the number of cells with the specified seq_id
|
|
// Find all the ranges of cells with this seq id (or all, when -1)
|
|
uint32_t cell_range_begin = size;
|
|
for (uint32_t i = 0; i < size; ++i) {
|
|
const auto & cell = cells[i];
|
|
if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) {
|
|
++cell_count;
|
|
if (cell_range_begin == size) {
|
|
cell_range_begin = i;
|
|
}
|
|
} else {
|
|
if (cell_range_begin != size) {
|
|
cell_ranges.emplace_back(cell_range_begin, i);
|
|
cell_range_begin = size;
|
|
}
|
|
}
|
|
}
|
|
if (cell_range_begin != size) {
|
|
cell_ranges.emplace_back(cell_range_begin, size);
|
|
}
|
|
|
|
// DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
|
|
uint32_t cell_count_check = 0;
|
|
for (const auto & range : cell_ranges) {
|
|
cell_count_check += range.second - range.first;
|
|
}
|
|
GGML_ASSERT(cell_count == cell_count_check);
|
|
|
|
io.write(&cell_count, sizeof(cell_count));
|
|
|
|
state_write_meta(io, cell_ranges, seq_id);
|
|
state_write_data(io, cell_ranges);
|
|
}
|
|
|
|
void llama_kv_cache_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
|
|
uint32_t cell_count;
|
|
io.read_to(&cell_count, sizeof(cell_count));
|
|
|
|
bool res = true;
|
|
|
|
res = res && state_read_meta(io, cell_count, seq_id);
|
|
res = res && state_read_data(io, cell_count);
|
|
|
|
if (!res) {
|
|
if (seq_id == -1) {
|
|
clear();
|
|
} else {
|
|
seq_rm(seq_id, -1, -1);
|
|
}
|
|
throw std::runtime_error("failed to restore kv cache");
|
|
}
|
|
}
|
|
|
|
void llama_kv_cache_recurrent::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const {
|
|
for (const auto & range : cell_ranges) {
|
|
for (uint32_t i = range.first; i < range.second; ++i) {
|
|
const auto & cell = cells[i];
|
|
const llama_pos pos = cell.pos;
|
|
const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0;
|
|
|
|
io.write(&pos, sizeof(pos));
|
|
io.write(&n_seq_id, sizeof(n_seq_id));
|
|
|
|
if (n_seq_id) {
|
|
for (auto seq_id : cell.seq_id) {
|
|
io.write(&seq_id, sizeof(seq_id));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const {
|
|
const uint32_t v_trans = 0;
|
|
const uint32_t n_layer = hparams.n_layer;
|
|
|
|
io.write(&v_trans, sizeof(v_trans));
|
|
io.write(&n_layer, sizeof(n_layer));
|
|
|
|
std::vector<uint8_t> tmp_buf;
|
|
|
|
// Iterate and write all the keys first, each row is a cell
|
|
// Get whole range at a time
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
|
|
|
|
// Write key type
|
|
const int32_t k_type_i = (int32_t)k_l[il]->type;
|
|
io.write(&k_type_i, sizeof(k_type_i));
|
|
|
|
// Write row size of key
|
|
const uint64_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa);
|
|
io.write(&k_size_row, sizeof(k_size_row));
|
|
|
|
// Read each range of cells of k_size length each into tmp_buf and write out
|
|
for (const auto & range : cell_ranges) {
|
|
const size_t range_size = range.second - range.first;
|
|
const size_t buf_size = range_size * k_size_row;
|
|
io.write_tensor(k_l[il], range.first * k_size_row, buf_size);
|
|
}
|
|
}
|
|
|
|
if (!v_trans) {
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
|
|
|
|
// Write value type
|
|
const int32_t v_type_i = (int32_t)v_l[il]->type;
|
|
io.write(&v_type_i, sizeof(v_type_i));
|
|
|
|
// Write row size of value
|
|
const uint64_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa);
|
|
io.write(&v_size_row, sizeof(v_size_row));
|
|
|
|
// Read each range of cells of v_size length each into tmp_buf and write out
|
|
for (const auto & range : cell_ranges) {
|
|
const size_t range_size = range.second - range.first;
|
|
const size_t buf_size = range_size * v_size_row;
|
|
io.write_tensor(v_l[il], range.first * v_size_row, buf_size);
|
|
}
|
|
}
|
|
} else {
|
|
// When v is transposed, we also need the element size and get the element ranges from each row
|
|
const uint32_t kv_size = size;
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
|
|
|
|
// Write value type
|
|
const int32_t v_type_i = (int32_t)v_l[il]->type;
|
|
io.write(&v_type_i, sizeof(v_type_i));
|
|
|
|
// Write element size
|
|
const uint32_t v_size_el = ggml_type_size(v_l[il]->type);
|
|
io.write(&v_size_el, sizeof(v_size_el));
|
|
|
|
// Write GQA embedding size
|
|
io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
|
|
|
|
// For each row, we get the element values of each cell
|
|
for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
|
|
// Read each range of cells of v_size_el length each into tmp_buf and write out
|
|
for (const auto & range : cell_ranges) {
|
|
const size_t range_size = range.second - range.first;
|
|
const size_t src_offset = (range.first + j * kv_size) * v_size_el;
|
|
const size_t buf_size = range_size * v_size_el;
|
|
io.write_tensor(v_l[il], src_offset, buf_size);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) {
|
|
if (dest_seq_id != -1) {
|
|
// single sequence
|
|
|
|
seq_rm(dest_seq_id, -1, -1);
|
|
|
|
llama_sbatch sbatch;
|
|
llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
|
|
|
|
batch.n_tokens = cell_count;
|
|
batch.n_seq_tokens = cell_count;
|
|
batch.n_seqs = 1;
|
|
|
|
for (uint32_t i = 0; i < cell_count; ++i) {
|
|
llama_pos pos;
|
|
uint32_t n_seq_id;
|
|
|
|
io.read_to(&pos, sizeof(pos));
|
|
io.read_to(&n_seq_id, sizeof(n_seq_id));
|
|
|
|
if (n_seq_id != 0) {
|
|
LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
|
|
return false;
|
|
}
|
|
|
|
batch.pos[i] = pos;
|
|
}
|
|
batch.n_seq_id[0] = 1;
|
|
batch.seq_id[0] = &dest_seq_id;
|
|
|
|
if (!find_slot(batch)) {
|
|
LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
|
|
return false;
|
|
}
|
|
|
|
// DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
|
|
// Assume that this is one contiguous block of cells
|
|
GGML_ASSERT(head + cell_count <= size);
|
|
GGML_ASSERT(cells[head].pos == batch.pos[0]);
|
|
GGML_ASSERT(cells[head + cell_count - 1].pos == batch.pos[cell_count - 1]);
|
|
GGML_ASSERT(cells[head].has_seq_id(dest_seq_id));
|
|
GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id));
|
|
} else {
|
|
// whole KV cache restore
|
|
|
|
if (cell_count > size) {
|
|
LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
|
|
return false;
|
|
}
|
|
|
|
clear();
|
|
|
|
for (uint32_t i = 0; i < cell_count; ++i) {
|
|
kv_cell & cell = cells[i];
|
|
|
|
llama_pos pos;
|
|
uint32_t n_seq_id;
|
|
|
|
io.read_to(&pos, sizeof(pos));
|
|
io.read_to(&n_seq_id, sizeof(n_seq_id));
|
|
|
|
cell.pos = pos;
|
|
|
|
for (uint32_t j = 0; j < n_seq_id; ++j) {
|
|
llama_seq_id seq_id;
|
|
io.read_to(&seq_id, sizeof(seq_id));
|
|
|
|
// TODO: llama_kv_cache_recurrent should have a notion of max sequences
|
|
//if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) {
|
|
if (seq_id < 0) {
|
|
//LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx));
|
|
LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, inf)\n", __func__, seq_id);
|
|
return false;
|
|
}
|
|
|
|
cell.seq_id.insert(seq_id);
|
|
|
|
int32_t & tail = cells[seq_id].tail;
|
|
if (tail != -1) {
|
|
LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail);
|
|
return false;
|
|
}
|
|
tail = i;
|
|
}
|
|
}
|
|
|
|
head = 0;
|
|
used = cell_count;
|
|
}
|
|
|
|
for (uint32_t i = 0; i < cell_count; ++i) {
|
|
uint32_t cell_id = head + i;
|
|
// make sure the recurrent states will keep their restored state
|
|
cells[cell_id].src = cell_id;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) {
|
|
uint32_t v_trans;
|
|
uint32_t n_layer;
|
|
io.read_to(&v_trans, sizeof(v_trans));
|
|
io.read_to(&n_layer, sizeof(n_layer));
|
|
|
|
if (n_layer != hparams.n_layer) {
|
|
LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer);
|
|
return false;
|
|
}
|
|
if (cell_count > size) {
|
|
LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size);
|
|
return false;
|
|
}
|
|
if (false != (bool) v_trans) {
|
|
LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
|
|
return false;
|
|
}
|
|
|
|
// For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
|
|
|
|
// Read type of key
|
|
int32_t k_type_i_ref;
|
|
io.read_to(&k_type_i_ref, sizeof(k_type_i_ref));
|
|
const int32_t k_type_i = (int32_t) k_l[il]->type;
|
|
if (k_type_i != k_type_i_ref) {
|
|
LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
|
|
return false;
|
|
}
|
|
|
|
// Read row size of key
|
|
uint64_t k_size_row_ref;
|
|
io.read_to(&k_size_row_ref, sizeof(k_size_row_ref));
|
|
const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa);
|
|
if (k_size_row != k_size_row_ref) {
|
|
LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
|
|
return false;
|
|
}
|
|
|
|
if (cell_count) {
|
|
// Read and set the keys for the whole cell range
|
|
ggml_backend_tensor_set(k_l[il], io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row);
|
|
}
|
|
}
|
|
|
|
if (!v_trans) {
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
|
|
|
|
// Read type of value
|
|
int32_t v_type_i_ref;
|
|
io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
|
|
const int32_t v_type_i = (int32_t)v_l[il]->type;
|
|
if (v_type_i != v_type_i_ref) {
|
|
LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
|
|
return false;
|
|
}
|
|
|
|
// Read row size of value
|
|
uint64_t v_size_row_ref;
|
|
io.read_to(&v_size_row_ref, sizeof(v_size_row_ref));
|
|
const size_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa);
|
|
if (v_size_row != v_size_row_ref) {
|
|
LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
|
|
return false;
|
|
}
|
|
|
|
if (cell_count) {
|
|
// Read and set the values for the whole cell range
|
|
ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row);
|
|
}
|
|
}
|
|
} else {
|
|
// For each layer, read the values for each cell (transposed)
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
|
|
|
|
// Read type of value
|
|
int32_t v_type_i_ref;
|
|
io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
|
|
const int32_t v_type_i = (int32_t)v_l[il]->type;
|
|
if (v_type_i != v_type_i_ref) {
|
|
LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
|
|
return false;
|
|
}
|
|
|
|
// Read element size of value
|
|
uint32_t v_size_el_ref;
|
|
io.read_to(&v_size_el_ref, sizeof(v_size_el_ref));
|
|
const size_t v_size_el = ggml_type_size(v_l[il]->type);
|
|
if (v_size_el != v_size_el_ref) {
|
|
LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
|
|
return false;
|
|
}
|
|
|
|
// Read GQA embedding size
|
|
uint32_t n_embd_v_gqa_ref;
|
|
io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
|
|
if (n_embd_v_gqa != n_embd_v_gqa_ref) {
|
|
LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
|
|
return false;
|
|
}
|
|
|
|
if (cell_count) {
|
|
// For each row in the transposed matrix, read the values for the whole cell range
|
|
for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
|
|
const size_t dst_offset = (head + j * size) * v_size_el;
|
|
ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
//
|
|
// llama_kv_cache_recurrent_state
|
|
//
|
|
|
|
llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state(llama_memory_status status) : status(status) {}
|
|
|
|
llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state(
|
|
llama_memory_status status,
|
|
llama_kv_cache_recurrent * kv) : status(status), kv(kv), is_full(true) {
|
|
}
|
|
|
|
llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state(
|
|
llama_memory_status status,
|
|
llama_kv_cache_recurrent * kv,
|
|
llama_sbatch sbatch,
|
|
std::vector<llama_ubatch> ubatches) : status(status), kv(kv), sbatch(std::move(sbatch)), ubatches(std::move(ubatches)) {}
|
|
|
|
llama_kv_cache_recurrent_state::~llama_kv_cache_recurrent_state() = default;
|
|
|
|
bool llama_kv_cache_recurrent_state::next() {
|
|
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
|
|
|
|
if (++i_next >= ubatches.size()) {
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool llama_kv_cache_recurrent_state::apply() {
|
|
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
|
|
|
|
kv->find_slot(ubatches[i_next]);
|
|
|
|
return true;
|
|
}
|
|
|
|
std::vector<int64_t> & llama_kv_cache_recurrent_state::out_ids() {
|
|
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
|
|
|
|
return sbatch.out_ids;
|
|
}
|
|
|
|
llama_memory_status llama_kv_cache_recurrent_state::get_status() const {
|
|
return status;
|
|
}
|
|
|
|
const llama_ubatch & llama_kv_cache_recurrent_state::get_ubatch() const {
|
|
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
|
|
|
|
return ubatches[i_next];
|
|
}
|
|
|
|
uint32_t llama_kv_cache_recurrent_state::get_n_kv() const {
|
|
return is_full ? kv->size : kv->n;
|
|
}
|
|
|
|
uint32_t llama_kv_cache_recurrent_state::get_head() const {
|
|
return is_full ? 0 : kv->head;
|
|
}
|
|
|
|
uint32_t llama_kv_cache_recurrent_state::get_size() const {
|
|
return kv->size;
|
|
}
|
|
|
|
ggml_tensor * llama_kv_cache_recurrent_state::get_k_l(int32_t il) const {
|
|
return kv->k_l[il];
|
|
}
|
|
|
|
ggml_tensor * llama_kv_cache_recurrent_state::get_v_l(int32_t il) const {
|
|
return kv->v_l[il];
|
|
}
|
|
|
|
int32_t llama_kv_cache_recurrent_state::s_copy(int i) const {
|
|
return kv->s_copy(i);
|
|
}
|
|
|
|
float llama_kv_cache_recurrent_state::s_mask(int i) const {
|
|
return kv->s_mask(i);
|
|
}
|