Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 0974ad7

Browse filesBrowse files
authored
llama : fix llama_model_chat_template with template name (LLM_KV with suffix) (#14050)
1 parent 745aa53 commit 0974ad7
Copy full SHA for 0974ad7

File tree

Expand file treeCollapse file tree

3 files changed

+9
-5
lines changed
Filter options
Expand file treeCollapse file tree

3 files changed

+9
-5
lines changed

‎src/llama-arch.cpp

Copy file name to clipboardExpand all lines: src/llama-arch.cpp
+8-3Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,6 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
200200
{ LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
201201
{ LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
202202
{ LLM_KV_TOKENIZER_CHAT_TEMPLATE, "tokenizer.chat_template" },
203-
{ LLM_KV_TOKENIZER_CHAT_TEMPLATE_N, "tokenizer.chat_template.%s" },
204203
{ LLM_KV_TOKENIZER_FIM_PRE_ID, "tokenizer.ggml.fim_pre_token_id" },
205204
{ LLM_KV_TOKENIZER_FIM_SUF_ID, "tokenizer.ggml.fim_suf_token_id" },
206205
{ LLM_KV_TOKENIZER_FIM_MID_ID, "tokenizer.ggml.fim_mid_token_id" },
@@ -1707,8 +1706,14 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
17071706
LLM_KV::LLM_KV(llm_arch arch, const char * suffix) : arch(arch), suffix(suffix) {}
17081707

17091708
std::string LLM_KV::operator()(llm_kv kv) const {
1710-
return suffix ? ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch), suffix)
1711-
: ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
1709+
std::string name = ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
1710+
1711+
if (suffix != nullptr) {
1712+
name += ".";
1713+
name += suffix;
1714+
}
1715+
1716+
return name;
17121717
}
17131718

17141719
std::string LLM_TN_IMPL::str() const {

‎src/llama-arch.h

Copy file name to clipboardExpand all lines: src/llama-arch.h
-1Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,6 @@ enum llm_kv {
196196
LLM_KV_TOKENIZER_HF_JSON,
197197
LLM_KV_TOKENIZER_RWKV,
198198
LLM_KV_TOKENIZER_CHAT_TEMPLATE,
199-
LLM_KV_TOKENIZER_CHAT_TEMPLATE_N,
200199
LLM_KV_TOKENIZER_FIM_PRE_ID,
201200
LLM_KV_TOKENIZER_FIM_SUF_ID,
202201
LLM_KV_TOKENIZER_FIM_MID_ID,

‎src/llama-model.cpp

Copy file name to clipboardExpand all lines: src/llama-model.cpp
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13788,7 +13788,7 @@ uint64_t llama_model_size(const llama_model * model) {
1378813788
}
1378913789

1379013790
const char * llama_model_chat_template(const llama_model * model, const char * name) {
13791-
const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE_N)
13791+
const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE)
1379213792
: LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE);
1379313793
const auto & it = model->gguf_kv.find(key);
1379413794
if (it == model->gguf_kv.end()) {

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.