Просмотр исходного кода

Count the tokens more conservatively

oobabooga 2 лет назад
Родитель
Сommit
c93f1fa99b
1 измененных файлов с 2 добавлено и 2 удалено
  1. 2 2
      modules/text_generation.py

+ 2 - 2
modules/text_generation.py

@@ -23,9 +23,9 @@ def get_max_prompt_length(tokens):
 def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
 def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
 
 
     # These models do not have explicit tokenizers for now, so
     # These models do not have explicit tokenizers for now, so
-    # we return an estimate on the number of tokens
+    # we return an estimate for the number of tokens
     if shared.is_RWKV or shared.is_LLaMA:
     if shared.is_RWKV or shared.is_LLaMA:
-        return np.zeros((1, len(prompt)//5))
+        return np.zeros((1, len(prompt)//4))
 
 
     input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)
     input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)
     if shared.args.cpu:
     if shared.args.cpu: