Skip to content

Commit c80a775

Browse files
authored
vocab : add special infill tokens for CodeLlama (ggml-org#11850)
* vocab : add special infill tokens for CodeLlama The commit adds the following special tokens for CodeLlama infill: - `▁<PRE>` - `▁<SUF>` - `▁<MID>` The motivation for this is that currently the infill example uses CodeLlama as a suggested model. But when using this model the following error is generated: ```console /llama.cpp-debug/examples/infill/infill.cpp:165: GGML_ASSERT(llama_vocab_fim_pre(vocab) >= 0) failed Could not attach to process. If your uid matches the uid of the target process, check the setting of /proc/sys/kernel/yama/ptrace_scope, or try again as the root user. For more details, see /etc/sysctl.d/10-ptrace.conf ptrace: Operation not permitted. No stack. The program is not being run. 305251 Aborted (core dumped) ./build/bin/llama-infill -t 10 -ngl 0 -m models/codellama-13b.Q5_K_S.gguf \ -c 4096 --temp 0.7 --repeat_penalty 1.1 -n 20 \ --in-prefix "def helloworld():\n print(\"hell" \ --in-suffix "\n print(\"goodbye world\")\n " ``` * squash! vocab : add special infill tokens for CodeLlama Add _<EOT> as well.
1 parent 250d795 commit c80a775

File tree

1 file changed

+5
-0
lines changed

1 file changed

+5
-0
lines changed

src/llama-vocab.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1807,6 +1807,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
18071807
|| t.first == "<end_of_turn>"
18081808
|| t.first == "<|endoftext|>"
18091809
|| t.first == "<EOT>"
1810+
|| t.first == "_<EOT>"
18101811
|| t.first == "<|end▁of▁sentence|>" // DeepSeek
18111812
) {
18121813
special_eot_id = t.second;
@@ -1839,6 +1840,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
18391840
|| t.first == "<fim-prefix>"
18401841
|| t.first == "<|fim▁begin|>" // DeepSeek
18411842
|| t.first == "<PRE>"
1843+
|| t.first == "▁<PRE>" // CodeLlama
18421844
) {
18431845
special_fim_pre_id = t.second;
18441846
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
@@ -1856,6 +1858,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
18561858
|| t.first == "<fim-suffix>"
18571859
|| t.first == "<|fim▁hole|>" // DeepSeek
18581860
|| t.first == "<SUF>"
1861+
|| t.first == "▁<SUF>" // CodeLlama
18591862
) {
18601863
special_fim_suf_id = t.second;
18611864
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
@@ -1873,6 +1876,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
18731876
|| t.first == "<fim-middle>"
18741877
|| t.first == "<|fim▁end|>" // DeepSeek
18751878
|| t.first == "<MID>"
1879+
|| t.first == "▁<MID>" // CodeLlama
18761880
) {
18771881
special_fim_mid_id = t.second;
18781882
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
@@ -1957,6 +1961,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
19571961
|| t.first == "<|endoftext|>"
19581962
|| t.first == "<|eom_id|>"
19591963
|| t.first == "<EOT>"
1964+
|| t.first == "_<EOT>"
19601965
) {
19611966
special_eog_ids.insert(t.second);
19621967
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {

0 commit comments

Comments
 (0)