Forráskód Böngészése

Use a list of eos_tokens instead of just a number

This might be the cause of LLaMA ramblings that some people have experienced.
oobabooga 2 éve
szülő
commit
c7aa51faa6
1 módosított fájl, 7 hozzáadás és 5 törlés
  1. 7 5
      modules/text_generation.py

+ 7 - 5
modules/text_generation.py

@@ -119,7 +119,9 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
     original_input_ids = input_ids
     output = input_ids[0]
     cuda = "" if (shared.args.cpu or shared.args.deepspeed or shared.args.flexgen) else ".cuda()"
-    n = shared.tokenizer.eos_token_id if eos_token is None else int(encode(eos_token)[0][-1])
+    eos_token_ids = [shared.tokenizer.eos_token_id]
+    if eos_token is not None:
+        eos_token_ids.append(int(encode(eos_token)[0][-1]))
     stopping_criteria_list = transformers.StoppingCriteriaList()
     if stopping_string is not None:
         # Copied from https://github.com/PygmalionAI/gradio-ui/blob/master/src/model.py
@@ -129,7 +131,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
     if not shared.args.flexgen:
         generate_params = [
             f"max_new_tokens=max_new_tokens",
-            f"eos_token_id={n}",
+            f"eos_token_id={eos_token_ids}",
             f"stopping_criteria=stopping_criteria_list",
             f"do_sample={do_sample}",
             f"temperature={temperature}",
@@ -149,7 +151,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
             f"max_new_tokens={max_new_tokens if shared.args.no_stream else 8}",
             f"do_sample={do_sample}",
             f"temperature={temperature}",
-            f"stop={n}",
+            f"stop={eos_token_ids[-1]}",
         ]
     if shared.args.deepspeed:
         generate_params.append("synced_gpus=True")
@@ -198,7 +200,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
                     if not (shared.args.chat or shared.args.cai_chat):
                         reply = original_question + apply_extensions(reply[len(question):], "output")
 
-                    if output[-1] == n:
+                    if output[-1] in eos_token_ids:
                         break
                     yield formatted_outputs(reply, shared.model_name)
 
@@ -219,7 +221,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
                 if not (shared.args.chat or shared.args.cai_chat):
                     reply = original_question + apply_extensions(reply[len(question):], "output")
 
-                if np.count_nonzero(input_ids[0] == n) < np.count_nonzero(output == n):
+                if np.count_nonzero(np.isin(input_ids[0], eos_token_ids)) < np.count_nonzero(np.isin(output, eos_token_ids)):
                     break
                 yield formatted_outputs(reply, shared.model_name)