mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-08-09 15:16:08 +02:00
talk-llama : sync llama.cpp (#2709)
This commit is contained in:
51
examples/talk-llama/llama-chat.h
Normal file
51
examples/talk-llama/llama-chat.h
Normal file
@ -0,0 +1,51 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <cstdint>
|
||||
|
||||
enum llm_chat_template {
|
||||
LLM_CHAT_TEMPLATE_CHATML,
|
||||
LLM_CHAT_TEMPLATE_LLAMA_2,
|
||||
LLM_CHAT_TEMPLATE_LLAMA_2_SYS,
|
||||
LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS,
|
||||
LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP,
|
||||
LLM_CHAT_TEMPLATE_MISTRAL_V1,
|
||||
LLM_CHAT_TEMPLATE_MISTRAL_V3,
|
||||
LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN,
|
||||
LLM_CHAT_TEMPLATE_MISTRAL_V7,
|
||||
LLM_CHAT_TEMPLATE_PHI_3,
|
||||
LLM_CHAT_TEMPLATE_FALCON_3,
|
||||
LLM_CHAT_TEMPLATE_ZEPHYR,
|
||||
LLM_CHAT_TEMPLATE_MONARCH,
|
||||
LLM_CHAT_TEMPLATE_GEMMA,
|
||||
LLM_CHAT_TEMPLATE_ORION,
|
||||
LLM_CHAT_TEMPLATE_OPENCHAT,
|
||||
LLM_CHAT_TEMPLATE_VICUNA,
|
||||
LLM_CHAT_TEMPLATE_VICUNA_ORCA,
|
||||
LLM_CHAT_TEMPLATE_DEEPSEEK,
|
||||
LLM_CHAT_TEMPLATE_DEEPSEEK_2,
|
||||
LLM_CHAT_TEMPLATE_DEEPSEEK_3,
|
||||
LLM_CHAT_TEMPLATE_COMMAND_R,
|
||||
LLM_CHAT_TEMPLATE_LLAMA_3,
|
||||
LLM_CHAT_TEMPLATE_CHATGML_3,
|
||||
LLM_CHAT_TEMPLATE_CHATGML_4,
|
||||
LLM_CHAT_TEMPLATE_MINICPM,
|
||||
LLM_CHAT_TEMPLATE_EXAONE_3,
|
||||
LLM_CHAT_TEMPLATE_RWKV_WORLD,
|
||||
LLM_CHAT_TEMPLATE_GRANITE,
|
||||
LLM_CHAT_TEMPLATE_GIGACHAT,
|
||||
LLM_CHAT_TEMPLATE_MEGREZ,
|
||||
LLM_CHAT_TEMPLATE_UNKNOWN,
|
||||
};
|
||||
|
||||
struct llama_chat_message;
|
||||
|
||||
llm_chat_template llm_chat_template_from_str(const std::string & name);
|
||||
|
||||
llm_chat_template llm_chat_detect_template(const std::string & tmpl);
|
||||
|
||||
int32_t llm_chat_apply_template(
|
||||
llm_chat_template tmpl,
|
||||
const std::vector<const llama_chat_message *> & chat,
|
||||
std::string & dest, bool add_ass);
|
Reference in New Issue
Block a user