|
|
@@ -15,6 +15,7 @@ import modules.extensions as extensions_module
|
|
|
import modules.shared as shared
|
|
|
import modules.ui as ui
|
|
|
from modules.html_generator import generate_chat_html
|
|
|
+from modules.LoRA import add_lora_to_model
|
|
|
from modules.models import load_model, load_soft_prompt
|
|
|
from modules.text_generation import generate_reply
|
|
|
|
|
|
@@ -48,6 +49,9 @@ def get_available_extensions():
|
|
|
def get_available_softprompts():
|
|
|
return ['None'] + sorted(set(map(lambda x : '.'.join(str(x.name).split('.')[:-1]), Path('softprompts').glob('*.zip'))), key=str.lower)
|
|
|
|
|
|
+def get_available_loras():
|
|
|
+ return ['None'] + sorted([item.name for item in list(Path('loras/').glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json'))], key=str.lower)
|
|
|
+
|
|
|
def load_model_wrapper(selected_model):
|
|
|
if selected_model != shared.model_name:
|
|
|
shared.model_name = selected_model
|
|
|
@@ -59,6 +63,17 @@ def load_model_wrapper(selected_model):
|
|
|
|
|
|
return selected_model
|
|
|
|
|
|
+def load_lora_wrapper(selected_lora):
|
|
|
+ shared.lora_name = selected_lora
|
|
|
+ default_text = shared.settings['lora_prompts'][next((k for k in shared.settings['lora_prompts'] if re.match(k.lower(), shared.lora_name.lower())), 'default')]
|
|
|
+
|
|
|
+ if not shared.args.cpu:
|
|
|
+ gc.collect()
|
|
|
+ torch.cuda.empty_cache()
|
|
|
+ add_lora_to_model(selected_lora)
|
|
|
+
|
|
|
+ return selected_lora, default_text
|
|
|
+
|
|
|
def load_preset_values(preset_menu, return_dict=False):
|
|
|
generate_params = {
|
|
|
'do_sample': True,
|
|
|
@@ -145,6 +160,10 @@ def create_settings_menus(default_preset):
|
|
|
shared.gradio['length_penalty'] = gr.Slider(-5, 5, value=generate_params['length_penalty'], label='length_penalty')
|
|
|
shared.gradio['early_stopping'] = gr.Checkbox(value=generate_params['early_stopping'], label='early_stopping')
|
|
|
|
|
|
+ with gr.Row():
|
|
|
+ shared.gradio['lora_menu'] = gr.Dropdown(choices=available_loras, value=shared.lora_name, label='LoRA')
|
|
|
+ ui.create_refresh_button(shared.gradio['lora_menu'], lambda : None, lambda : {'choices': get_available_loras()}, 'refresh-button')
|
|
|
+
|
|
|
with gr.Accordion('Soft prompt', open=False):
|
|
|
with gr.Row():
|
|
|
shared.gradio['softprompts_menu'] = gr.Dropdown(choices=available_softprompts, value='None', label='Soft prompt')
|
|
|
@@ -156,6 +175,7 @@ def create_settings_menus(default_preset):
|
|
|
|
|
|
shared.gradio['model_menu'].change(load_model_wrapper, [shared.gradio['model_menu']], [shared.gradio['model_menu']], show_progress=True)
|
|
|
shared.gradio['preset_menu'].change(load_preset_values, [shared.gradio['preset_menu']], [shared.gradio['do_sample'], shared.gradio['temperature'], shared.gradio['top_p'], shared.gradio['typical_p'], shared.gradio['repetition_penalty'], shared.gradio['encoder_repetition_penalty'], shared.gradio['top_k'], shared.gradio['min_length'], shared.gradio['no_repeat_ngram_size'], shared.gradio['num_beams'], shared.gradio['penalty_alpha'], shared.gradio['length_penalty'], shared.gradio['early_stopping']])
|
|
|
+ shared.gradio['lora_menu'].change(load_lora_wrapper, [shared.gradio['lora_menu']], [shared.gradio['lora_menu'], shared.gradio['textbox']], show_progress=True)
|
|
|
shared.gradio['softprompts_menu'].change(load_soft_prompt, [shared.gradio['softprompts_menu']], [shared.gradio['softprompts_menu']], show_progress=True)
|
|
|
shared.gradio['upload_softprompt'].upload(upload_soft_prompt, [shared.gradio['upload_softprompt']], [shared.gradio['softprompts_menu']])
|
|
|
|
|
|
@@ -181,6 +201,7 @@ available_models = get_available_models()
|
|
|
available_presets = get_available_presets()
|
|
|
available_characters = get_available_characters()
|
|
|
available_softprompts = get_available_softprompts()
|
|
|
+available_loras = get_available_loras()
|
|
|
|
|
|
# Default extensions
|
|
|
extensions_module.available_extensions = get_available_extensions()
|
|
|
@@ -213,10 +234,16 @@ else:
|
|
|
print()
|
|
|
shared.model_name = available_models[i]
|
|
|
shared.model, shared.tokenizer = load_model(shared.model_name)
|
|
|
+if shared.args.lora:
|
|
|
+ print(shared.args.lora)
|
|
|
+ shared.lora_name = shared.args.lora
|
|
|
+ add_lora_to_model(shared.lora_name)
|
|
|
|
|
|
# Default UI settings
|
|
|
default_preset = shared.settings['presets'][next((k for k in shared.settings['presets'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
|
|
|
-default_text = shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
|
|
|
+default_text = shared.settings['lora_prompts'][next((k for k in shared.settings['lora_prompts'] if re.match(k.lower(), shared.lora_name.lower())), 'default')]
|
|
|
+if default_text == '':
|
|
|
+ default_text = shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
|
|
|
title ='Text generation web UI'
|
|
|
description = '\n\n# Text generation lab\nGenerate text using Large Language Models.\n'
|
|
|
suffix = '_pygmalion' if 'pygmalion' in shared.model_name.lower() else ''
|