소스 검색

Move more widgets into generation_parameters

oobabooga 2 년 전
부모
커밋
cfdbc8bd23
3개의 변경된 파일48개의 추가작업 그리고 30개의 파일을 삭제
  1. 18 8
      modules/chat.py
  2. 3 1
      modules/text_generation.py
  3. 27 21
      server.py

+ 18 - 8
modules/chat.py

@@ -96,7 +96,12 @@ def extract_message_from_reply(reply, name1, name2, stop_at_newline):
     reply = fix_newlines(reply)
     return reply, next_character_found
 
-def chatbot_wrapper(text, max_new_tokens, generation_params, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts=1, regenerate=False, mode="cai-chat", end_of_turn=""):
+def chatbot_wrapper(text, generation_params, name1, name2, context, regenerate=False, mode="cai-chat", end_of_turn=""):
+    stop_at_newline = generation_params['stop_at_newline']
+    max_new_tokens = generation_params['max_new_tokens']
+    chat_prompt_size = generation_params['chat_prompt_size']
+    chat_generation_attempts = generation_params['chat_generation_attempts']
+
     just_started = True
     eos_token = '\n' if stop_at_newline else None
     name1_original = name1
@@ -131,7 +136,7 @@ def chatbot_wrapper(text, max_new_tokens, generation_params, seed, name1, name2,
     cumulative_reply = ''
     for i in range(chat_generation_attempts):
         reply = None
-        for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", max_new_tokens, generation_params, seed, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]):
+        for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generation_params, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]):
             reply = cumulative_reply + reply
 
             # Extracting the reply
@@ -160,7 +165,12 @@ def chatbot_wrapper(text, max_new_tokens, generation_params, seed, name1, name2,
 
     yield shared.history['visible']
 
-def impersonate_wrapper(text, max_new_tokens, generation_params, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts=1, mode="cai-chat", end_of_turn=""):
+def impersonate_wrapper(text, generation_params, name1, name2, context, mode="cai-chat", end_of_turn=""):
+    stop_at_newline = generation_params['stop_at_newline']
+    max_new_tokens = generation_params['max_new_tokens']
+    chat_prompt_size = generation_params['chat_prompt_size']
+    chat_generation_attempts = generation_params['chat_generation_attempts']
+
     eos_token = '\n' if stop_at_newline else None
 
     if 'pygmalion' in shared.model_name.lower():
@@ -174,7 +184,7 @@ def impersonate_wrapper(text, max_new_tokens, generation_params, seed, name1, na
     cumulative_reply = ''
     for i in range(chat_generation_attempts):
         reply = None
-        for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", max_new_tokens, generation_params, seed, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]):
+        for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generation_params, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]):
             reply = cumulative_reply + reply
             reply, next_character_found = extract_message_from_reply(reply, name1, name2, stop_at_newline)
             yield reply
@@ -186,11 +196,11 @@ def impersonate_wrapper(text, max_new_tokens, generation_params, seed, name1, na
 
     yield reply
 
-def cai_chatbot_wrapper(text, max_new_tokens, generation_params, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts=1, mode="cai-chat", end_of_turn=""):
-    for history in chatbot_wrapper(text, max_new_tokens, generation_params, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts, regenerate=False, mode=mode, end_of_turn=end_of_turn):
+def cai_chatbot_wrapper(text, generation_params, name1, name2, context, mode="cai-chat", end_of_turn=""):
+    for history in chatbot_wrapper(text, generation_params, name1, name2, context, regenerate=False, mode=mode, end_of_turn=end_of_turn):
         yield chat_html_wrapper(history, name1, name2, mode)
 
-def regenerate_wrapper(text, max_new_tokens, generation_params, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts=1, mode="cai-chat", end_of_turn=""):
+def regenerate_wrapper(text, generation_params, name1, name2, context, mode="cai-chat", end_of_turn=""):
     if (shared.character != 'None' and len(shared.history['visible']) == 1) or len(shared.history['internal']) == 0:
         yield chat_html_wrapper(shared.history['visible'], name1, name2, mode)
     else:
@@ -198,7 +208,7 @@ def regenerate_wrapper(text, max_new_tokens, generation_params, seed, name1, nam
         last_internal = shared.history['internal'].pop()
         # Yield '*Is typing...*'
         yield chat_html_wrapper(shared.history['visible']+[[last_visible[0], shared.processing_message]], name1, name2, mode)
-        for history in chatbot_wrapper(last_internal[0], max_new_tokens, generation_params, seed, name1, name2, context, stop_at_newline, chat_prompt_size, chat_generation_attempts, regenerate=True, mode=mode, end_of_turn=end_of_turn):
+        for history in chatbot_wrapper(last_internal[0], generation_params, name1, name2, context, regenerate=True, mode=mode, end_of_turn=end_of_turn):
             shared.history['visible'][-1] = [last_visible[0], history[-1][1]]
             yield chat_html_wrapper(shared.history['visible'], name1, name2, mode)
 

+ 3 - 1
modules/text_generation.py

@@ -102,7 +102,9 @@ def set_manual_seed(seed):
 def stop_everything_event():
     shared.stop_everything = True
 
-def generate_reply(question, max_new_tokens, generation_params, seed, eos_token=None, stopping_strings=[]):
+def generate_reply(question, generation_params, eos_token=None, stopping_strings=[]):
+    max_new_tokens = generation_params['max_new_tokens']
+    seed = generation_params['seed']
     print(generation_params)
     print('---------------')
     clear_torch_cache()

+ 27 - 21
server.py

@@ -168,6 +168,8 @@ def create_prompt_menus():
 
 def create_settings_menus(default_preset):
     generate_params = load_preset_values(default_preset if not shared.args.flexgen else 'Naive')
+    for k in ['max_new_tokens', 'seed', 'stop_at_newline', 'chat_prompt_size', 'chat_generation_attempts']:
+        generate_params[k] = shared.settings[k]
     shared.gradio['generation_state'] = gr.State(generate_params)
 
     with gr.Row():
@@ -219,23 +221,6 @@ def create_settings_menus(default_preset):
         with gr.Row():
             shared.gradio['upload_softprompt'] = gr.File(type='binary', file_types=['.zip'])
 
-    def update_dict(_dict, k, v):
-        _dict[k] = v
-        return _dict
-
-    for k in ['do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']:
-        if type(shared.gradio[k]) is gr.Checkbox:
-            shared.gradio[k].change(
-                lambda state, value, copy=k: update_dict(state, copy, value),
-                inputs=[shared.gradio['generation_state'], shared.gradio[k]],
-                outputs=shared.gradio['generation_state'],
-            )
-        else:
-            shared.gradio[k].release(
-                lambda state, value, copy=k: update_dict(state, copy, value),
-                inputs=[shared.gradio['generation_state'], shared.gradio[k]],
-                outputs=shared.gradio['generation_state'],
-            )
 
     shared.gradio['model_menu'].change(load_model_wrapper, [shared.gradio['model_menu']], [shared.gradio['model_menu']], show_progress=True)
     shared.gradio['preset_menu'].change(load_preset_values, [shared.gradio['preset_menu']], [shared.gradio[k] for k in ['generation_state']])
@@ -388,11 +373,11 @@ def create_interface():
                             shared.gradio['chat_prompt_size_slider'] = gr.Slider(minimum=shared.settings['chat_prompt_size_min'], maximum=shared.settings['chat_prompt_size_max'], step=1, label='Maximum prompt size in tokens', value=shared.settings['chat_prompt_size'])
                         with gr.Column():
                             shared.gradio['chat_generation_attempts'] = gr.Slider(minimum=shared.settings['chat_generation_attempts_min'], maximum=shared.settings['chat_generation_attempts_max'], value=shared.settings['chat_generation_attempts'], step=1, label='Generation attempts (for longer replies)')
-                            shared.gradio['check'] = gr.Checkbox(value=shared.settings['stop_at_newline'], label='Stop generating at new line character?')
+                            shared.gradio['stop_at_newline'] = gr.Checkbox(value=shared.settings['stop_at_newline'], label='Stop generating at new line character?')
 
                 create_settings_menus(default_preset)
 
-            shared.input_params = [shared.gradio[k] for k in ['Chat input', 'max_new_tokens', 'generation_state', 'seed', 'name1', 'name2', 'context', 'check', 'chat_prompt_size_slider', 'chat_generation_attempts', 'Chat mode', 'end_of_turn']]
+            shared.input_params = [shared.gradio[k] for k in ['Chat input', 'generation_state', 'name1', 'name2', 'context', 'Chat mode', 'end_of_turn']]
 
             def set_chat_input(textbox):
                 return textbox, ""
@@ -472,7 +457,7 @@ def create_interface():
             with gr.Tab("Parameters", elem_id="parameters"):
                 create_settings_menus(default_preset)
 
-            shared.input_params = [shared.gradio[k] for k in ['textbox', 'max_new_tokens', 'generation_state', 'seed']]
+            shared.input_params = [shared.gradio[k] for k in ['textbox', 'generation_state']]
             output_params = [shared.gradio[k] for k in ['textbox', 'markdown', 'html']]
             gen_events.append(shared.gradio['Generate'].click(generate_reply, shared.input_params, output_params, show_progress=shared.args.no_stream))
             gen_events.append(shared.gradio['textbox'].submit(generate_reply, shared.input_params, output_params, show_progress=shared.args.no_stream))
@@ -505,7 +490,7 @@ def create_interface():
             with gr.Tab("Parameters", elem_id="parameters"):
                 create_settings_menus(default_preset)
 
-            shared.input_params = [shared.gradio[k] for k in ['textbox', 'max_new_tokens', 'generation_state', 'seed']]
+            shared.input_params = [shared.gradio[k] for k in ['textbox', 'generation_state']]
             output_params = [shared.gradio[k] for k in ['output_textbox', 'markdown', 'html']]
             gen_events.append(shared.gradio['Generate'].click(generate_reply, shared.input_params, output_params, show_progress=shared.args.no_stream))
             gen_events.append(shared.gradio['textbox'].submit(generate_reply, shared.input_params, output_params, show_progress=shared.args.no_stream))
@@ -540,6 +525,27 @@ def create_interface():
         if shared.args.extensions is not None:
             extensions_module.create_extensions_block()
 
+        def update_dict(_dict, k, v):
+            _dict[k] = v
+            return _dict
+
+        print([k for k in shared.gradio])
+        for k in ['do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping', 'max_new_tokens', 'seed', 'stop_at_newline', 'chat_prompt_size', 'chat_generation_attempts']:
+            if k not in shared.gradio:
+                continue
+            if type(shared.gradio[k]) in [gr.Checkbox, gr.Number]:
+                shared.gradio[k].change(
+                    lambda state, value, copy=k: update_dict(state, copy, value),
+                    inputs=[shared.gradio['generation_state'], shared.gradio[k]],
+                    outputs=shared.gradio['generation_state'],
+                )
+            else:
+                shared.gradio[k].release(
+                    lambda state, value, copy=k: update_dict(state, copy, value),
+                    inputs=[shared.gradio['generation_state'], shared.gradio[k]],
+                    outputs=shared.gradio['generation_state'],
+                )
+
     # Authentication
     auth = None
     if shared.args.gradio_auth_path is not None: