Explorar o código

Several QoL changes related to LoRA

oobabooga %!s(int64=2) %!d(string=hai) anos
pai
achega
214dc6868e
Modificáronse 3 ficheiros con 48 adicións e 34 borrados
  1. 5 0
      modules/shared.py
  2. 7 1
      server.py
  3. 36 33
      settings-template.json

+ 5 - 0
modules/shared.py

@@ -53,6 +53,10 @@ settings = {
         '^(gpt4chan|gpt-4chan|4chan)': '-----\n--- 865467536\nInput text\n--- 865467537\n',
         '(rosey|chip|joi)_.*_instruct.*': 'User: \n',
         'oasst-*': '<|prompter|>Write a story about future of AI development<|endoftext|><|assistant|>'
+    },
+    'lora_prompts': {
+        'default': 'Common sense questions and answers\n\nQuestion: \nFactual answer:',
+        'alpaca-lora-7b': "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n### Instruction:\nWrite a Python script that generates text using the transformers library.\n### Response:\n"
     }
 }
 
@@ -68,6 +72,7 @@ def str2bool(v):
 
 parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog,max_help_position=54))
 parser.add_argument('--model', type=str, help='Name of the model to load by default.')
+parser.add_argument('--lora', type=str, help='Name of the LoRA to apply to the model by default.')
 parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.')
 parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode.')
 parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.')

+ 7 - 1
server.py

@@ -225,10 +225,16 @@ else:
         print()
     shared.model_name = available_models[i]
 shared.model, shared.tokenizer = load_model(shared.model_name)
+if shared.args.lora:
+    shared.lora_name = shared.args.lora
+    print(f"Adding the LoRA {shared.lora_name} to the model...")
+    add_lora_to_model(shared.lora_name)
 
 # Default UI settings
 default_preset = shared.settings['presets'][next((k for k in shared.settings['presets'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
-default_text = shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
+default_text = shared.settings['lora_prompts'][next((k for k in shared.settings['lora_prompts'] if re.match(k.lower(), shared.lora_name.lower())), 'default')]
+if default_text == '':
+    default_text = shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
 title ='Text generation web UI'
 description = '\n\n# Text generation lab\nGenerate text using Large Language Models.\n'
 suffix = '_pygmalion' if 'pygmalion' in shared.model_name.lower() else ''

+ 36 - 33
settings-template.json

@@ -1,35 +1,38 @@
 {
-    "max_new_tokens": 200,
-    "max_new_tokens_min": 1,
-    "max_new_tokens_max": 2000,
-    "name1": "Person 1",
-    "name2": "Person 2",
-    "context": "This is a conversation between two people.",
-    "stop_at_newline": true,
-    "chat_prompt_size": 2048,
-    "chat_prompt_size_min": 0,
-    "chat_prompt_size_max": 2048,
-    "chat_generation_attempts": 1,
-    "chat_generation_attempts_min": 1,
-    "chat_generation_attempts_max": 5,
-    "name1_pygmalion": "You",
-    "name2_pygmalion": "Kawaii",
-    "context_pygmalion": "Kawaii's persona: Kawaii is a cheerful person who loves to make others smile. She is an optimist who loves to spread happiness and positivity wherever she goes.\n<START>",
-    "stop_at_newline_pygmalion": false,
-    "default_extensions": [],
-    "chat_default_extensions": [
-        "gallery"
-    ],
-    "presets": {
-        "default": "NovelAI-Sphinx Moth",
-        "pygmalion-*": "Pygmalion",
-        "RWKV-*": "Naive",
-        "(rosey|chip|joi)_.*_instruct.*": "Instruct Joi (Contrastive Search)"
-    },
-    "prompts": {
-        "default": "Common sense questions and answers\n\nQuestion: \nFactual answer:",
-        "^(gpt4chan|gpt-4chan|4chan)": "-----\n--- 865467536\nInput text\n--- 865467537\n",
-        "(rosey|chip|joi)_.*_instruct.*": "User: \n",
-        "oasst-*": "<|prompter|>Write a story about future of AI development<|endoftext|><|assistant|>"
-    }
+  "max_new_tokens": 200,
+  "max_new_tokens_min": 1,
+  "max_new_tokens_max": 2000,
+  "name1": "Person 1",
+  "name2": "Person 2",
+  "context": "This is a conversation between two people.",
+  "stop_at_newline": true,
+  "chat_prompt_size": 2048,
+  "chat_prompt_size_min": 0,
+  "chat_prompt_size_max": 2048,
+  "chat_generation_attempts": 1,
+  "chat_generation_attempts_min": 1,
+  "chat_generation_attempts_max": 5,
+  "name1_pygmalion": "You",
+  "name2_pygmalion": "Kawaii",
+  "context_pygmalion": "Kawaii's persona: Kawaii is a cheerful person who loves to make others smile. She is an optimist who loves to spread happiness and positivity wherever she goes.\n<START>",
+  "stop_at_newline_pygmalion": false,
+  "default_extensions": [],
+  "chat_default_extensions": [
+    "gallery"
+  ],
+  "presets": {
+    "default": "NovelAI-Sphinx Moth",
+    "pygmalion-*": "Pygmalion",
+    "RWKV-*": "Naive"
+  },
+  "prompts": {
+    "default": "Common sense questions and answers\n\nQuestion: \nFactual answer:",
+    "^(gpt4chan|gpt-4chan|4chan)": "-----\n--- 865467536\nInput text\n--- 865467537\n",
+    "(rosey|chip|joi)_.*_instruct.*": "User: \n",
+    "oasst-*": "<|prompter|>Write a story about future of AI development<|endoftext|><|assistant|>"
+  },
+  "lora_prompts": {
+    "default": "Common sense questions and answers\n\nQuestion: \nFactual answer:",
+    "alpaca-lora-7b": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n### Instruction:\nWrite a Python script that generates text using the transformers library.\n### Response:\n"
+  }
 }