Przeglądaj źródła

Add shared.is_chat() function

oobabooga 2 lat temu
rodzic
commit
b0890a7925

+ 1 - 1
extensions/llama_prompts/script.py

@@ -11,7 +11,7 @@ def get_prompt_by_name(name):
         return df[df['Prompt name'] == name].iloc[0]['Prompt'].replace('\\n', '\n')
 
 def ui():
-    if not shared.args.chat or shared.args.cai_chat:
+    if not shared.is_chat():
         choices = ['None'] + list(df['Prompt name'])
 
         prompts_menu = gr.Dropdown(value=choices[0], choices=choices, label='Prompt')

+ 1 - 1
extensions/silero_tts/script.py

@@ -74,7 +74,7 @@ def input_modifier(string):
     """
 
     # Remove autoplay from the last reply
-    if (shared.args.chat or shared.args.cai_chat) and len(shared.history['internal']) > 0:
+    if shared.is_chat() and len(shared.history['internal']) > 0:
         shared.history['visible'][-1] = [shared.history['visible'][-1][0], shared.history['visible'][-1][1].replace('controls autoplay>','controls>')]
 
     shared.processing_message = "*Is recording a voice message...*"

+ 3 - 1
modules/shared.py

@@ -129,10 +129,12 @@ parser.add_argument("--gradio-auth-path", type=str, help='Set the gradio authent
 
 args = parser.parse_args()
 
-
 # Provisional, this will be deleted later
 deprecated_dict = {'gptq_bits': ['wbits', 0], 'gptq_model_type': ['model_type', None], 'gptq_pre_layer': ['prelayer', 0]}
 for k in deprecated_dict:
     if eval(f"args.{k}") != deprecated_dict[k][1]:
         print(f"Warning: --{k} is deprecated and will be removed. Use --{deprecated_dict[k][0]} instead.")
         exec(f"args.{deprecated_dict[k][0]} = args.{k}")
+
+def is_chat():
+    return any((args.chat, args.cai_chat))

+ 9 - 9
modules/text_generation.py

@@ -76,7 +76,7 @@ def fix_galactica(s):
     return s
 
 def formatted_outputs(reply, model_name):
-    if not (shared.args.chat or shared.args.cai_chat):
+    if not shared.is_chat():
         if 'galactica' in model_name.lower():
             reply = fix_galactica(reply)
             return reply, reply, generate_basic_html(reply)
@@ -109,7 +109,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
     t0 = time.time()
 
     original_question = question
-    if not (shared.args.chat or shared.args.cai_chat):
+    if not shared.is_chat():
         question = apply_extensions(question, "input")
     if shared.args.verbose:
         print(f"\n\n{question}\n--------------------\n")
@@ -121,18 +121,18 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
             if shared.args.no_stream:
                 reply = shared.model.generate(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty)
                 output = original_question+reply
-                if not (shared.args.chat or shared.args.cai_chat):
+                if not shared.is_chat():
                     reply = original_question + apply_extensions(reply, "output")
                 yield formatted_outputs(reply, shared.model_name)
             else:
-                if not (shared.args.chat or shared.args.cai_chat):
+                if not shared.is_chat():
                     yield formatted_outputs(question, shared.model_name)
 
                 # RWKV has proper streaming, which is very nice.
                 # No need to generate 8 tokens at a time.
                 for reply in shared.model.generate_with_streaming(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty):
                     output = original_question+reply
-                    if not (shared.args.chat or shared.args.cai_chat):
+                    if not shared.is_chat():
                         reply = original_question + apply_extensions(reply, "output")
                     yield formatted_outputs(reply, shared.model_name)
 
@@ -208,7 +208,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
 
             new_tokens = len(output) - len(input_ids[0])
             reply = decode(output[-new_tokens:])
-            if not (shared.args.chat or shared.args.cai_chat):
+            if not shared.is_chat():
                 reply = original_question + apply_extensions(reply, "output")
 
             yield formatted_outputs(reply, shared.model_name)
@@ -226,7 +226,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
             def generate_with_streaming(**kwargs):
                 return Iteratorize(generate_with_callback, kwargs, callback=None)
 
-            if not (shared.args.chat or shared.args.cai_chat):
+            if not shared.is_chat():
                 yield formatted_outputs(original_question, shared.model_name)
             with generate_with_streaming(**generate_params) as generator:
                 for output in generator:
@@ -235,7 +235,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
 
                     new_tokens = len(output) - len(input_ids[0])
                     reply = decode(output[-new_tokens:])
-                    if not (shared.args.chat or shared.args.cai_chat):
+                    if not shared.is_chat():
                         reply = original_question + apply_extensions(reply, "output")
 
                     if output[-1] in eos_token_ids:
@@ -253,7 +253,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
 
                 new_tokens = len(output) - len(original_input_ids[0])
                 reply = decode(output[-new_tokens:])
-                if not (shared.args.chat or shared.args.cai_chat):
+                if not shared.is_chat():
                     reply = original_question + apply_extensions(reply, "output")
 
                 if np.count_nonzero(np.isin(input_ids[0], eos_token_ids)) < np.count_nonzero(np.isin(output, eos_token_ids)):

+ 3 - 3
server.py

@@ -244,7 +244,7 @@ available_loras = get_available_loras()
 
 # Default extensions
 extensions_module.available_extensions = get_available_extensions()
-if shared.args.chat or shared.args.cai_chat:
+if shared.is_chat():
     for extension in shared.settings['chat_default_extensions']:
         shared.args.extensions = shared.args.extensions or []
         if extension not in shared.args.extensions:
@@ -290,8 +290,8 @@ def create_interface():
     if shared.args.extensions is not None and len(shared.args.extensions) > 0:
         extensions_module.load_extensions()
 
-    with gr.Blocks(css=ui.css if not any((shared.args.chat, shared.args.cai_chat)) else ui.css+ui.chat_css, analytics_enabled=False, title=title) as shared.gradio['interface']:
-        if shared.args.chat or shared.args.cai_chat:
+    with gr.Blocks(css=ui.css if not shared.is_chat() else ui.css+ui.chat_css, analytics_enabled=False, title=title) as shared.gradio['interface']:
+        if shared.is_chat():
             with gr.Tab("Text generation", elem_id="main"):
                 if shared.args.cai_chat:
                     shared.gradio['display'] = gr.HTML(value=generate_chat_html(shared.history['visible'], shared.settings['name1'], shared.settings['name2'], shared.character))