ソースを参照

Merge remote-tracking branch 'upstream/main'

Xan 2 年 前
コミット
33df4bd91f

+ 6 - 5
README.md

@@ -1,6 +1,6 @@
 # Text generation web UI
 
-A gradio web UI for running Large Language Models like GPT-J 6B, OPT, GALACTICA, GPT-Neo, and Pygmalion.
+A gradio web UI for running Large Language Models like GPT-J 6B, OPT, GALACTICA, LLaMA, and Pygmalion.
 
 Its goal is to become the [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) of text generation.
 
@@ -27,6 +27,7 @@ Its goal is to become the [AUTOMATIC1111/stable-diffusion-webui](https://github.
 * [FlexGen offload](https://github.com/oobabooga/text-generation-webui/wiki/FlexGen).
 * [DeepSpeed ZeRO-3 offload](https://github.com/oobabooga/text-generation-webui/wiki/DeepSpeed).
 * Get responses via API, [with](https://github.com/oobabooga/text-generation-webui/blob/main/api-example-streaming.py) or [without](https://github.com/oobabooga/text-generation-webui/blob/main/api-example.py) streaming.
+* [Supports the LLaMA model](https://github.com/oobabooga/text-generation-webui/wiki/LLaMA-model).
 * [Supports the RWKV model](https://github.com/oobabooga/text-generation-webui/wiki/RWKV-model).
 * Supports softprompts.
 * [Supports extensions](https://github.com/oobabooga/text-generation-webui/wiki/Extensions).
@@ -53,7 +54,7 @@ The third line assumes that you have an NVIDIA GPU.
 pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2
 ```
   	  
-* If you are running in CPU mode, replace the third command with this one:
+* If you are running it in CPU mode, replace the third command with this one:
 
 ```
 conda install pytorch torchvision torchaudio git -c pytorch
@@ -137,6 +138,7 @@ Optionally, you can use the following command-line flags:
 | `--cai-chat`  | Launch the web UI in chat mode with a style similar to Character.AI's. If the file `img_bot.png` or `img_bot.jpg` exists in the same folder as server.py, this image will be used as the bot's profile picture. Similarly, `img_me.png` or `img_me.jpg` will be used as your profile picture. |
 | `--cpu`       | Use the CPU to generate text.|
 | `--load-in-8bit`  | Load the model with 8-bit precision.|
+| `--load-in-4bit`  |  Load the model with 4-bit precision. Currently only works with LLaMA. |
 | `--bf16`  | Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. |
 | `--auto-devices` | Automatically split the model across the available GPU(s) and CPU.|
 | `--disk` | If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. |
@@ -187,8 +189,7 @@ For these two, please try commenting on an existing issue instead of creating a
 
 ## Credits
 
+- Gradio dropdown menu refresh button: https://github.com/AUTOMATIC1111/stable-diffusion-webui
+- Verbose preset: Anonymous 4chan user.
 - NovelAI and KoboldAI presets: https://github.com/KoboldAI/KoboldAI-Client/wiki/Settings-Presets
 - Pygmalion preset, code for early stopping in chat mode, code for some of the sliders, --chat mode colors: https://github.com/PygmalionAI/gradio-ui/
-- Verbose preset: Anonymous 4chan user.
-- Instruct-Joi preset: https://huggingface.co/Rallio67/joi_12B_instruct_alpha
-- Gradio dropdown menu refresh button: https://github.com/AUTOMATIC1111/stable-diffusion-webui

+ 14 - 6
download-model.py

@@ -5,7 +5,9 @@ Example:
 python download-model.py facebook/opt-1.3b
 
 '''
+
 import argparse
+import base64
 import json
 import multiprocessing
 import re
@@ -93,23 +95,28 @@ facebook/opt-1.3b
 def get_download_links_from_huggingface(model, branch):
     base = "https://huggingface.co"
     page = f"/api/models/{model}/tree/{branch}?cursor="
+    cursor = b""
 
     links = []
     classifications = []
     has_pytorch = False
     has_safetensors = False
-    while page is not None:
-        content = requests.get(f"{base}{page}").content
+    while True:
+        content = requests.get(f"{base}{page}{cursor.decode()}").content
+
         dict = json.loads(content)
+        if len(dict) == 0:
+            break
 
         for i in range(len(dict)):
             fname = dict[i]['path']
 
             is_pytorch = re.match("pytorch_model.*\.bin", fname)
             is_safetensors = re.match("model.*\.safetensors", fname)
-            is_text = re.match(".*\.(txt|json)", fname)
+            is_tokenizer = re.match("tokenizer.*\.model", fname)
+            is_text = re.match(".*\.(txt|json)", fname) or is_tokenizer
 
-            if is_text or is_safetensors or is_pytorch:
+            if any((is_pytorch, is_safetensors, is_text, is_tokenizer)):
                 if is_text:
                     links.append(f"https://huggingface.co/{model}/resolve/{branch}/{fname}")
                     classifications.append('text')
@@ -123,8 +130,9 @@ def get_download_links_from_huggingface(model, branch):
                         has_pytorch = True
                         classifications.append('pytorch')
 
-        #page = dict['nextUrl']
-        page = None
+        cursor = base64.b64encode(f'{{"file_name":"{dict[-1]["path"]}"}}'.encode()) + b':50'
+        cursor = base64.b64encode(cursor)
+        cursor = cursor.replace(b'=', b'%3D')
 
     # If both pytorch and safetensors are available, download safetensors only
     if has_pytorch and has_safetensors:

+ 49 - 1
modules/models.py

@@ -1,5 +1,6 @@
 import json
 import os
+import sys
 import time
 import zipfile
 from pathlib import Path
@@ -41,7 +42,7 @@ def load_model(model_name):
     shared.is_RWKV = model_name.lower().startswith('rwkv-')
 
     # Default settings
-    if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen or shared.is_RWKV):
+    if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.load_in_4bit or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen or shared.is_RWKV):
         if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
             model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
         else:
@@ -86,6 +87,53 @@ def load_model(model_name):
 
         return model, tokenizer
 
+    # 4-bit LLaMA
+    elif shared.args.load_in_4bit:
+        sys.path.insert(0, os.path.abspath(Path("repositories/GPTQ-for-LLaMa")))
+
+        from llama import load_quant
+
+        path_to_model = Path(f'models/{model_name}')
+        pt_model = ''
+        if path_to_model.name.lower().startswith('llama-7b'):
+            pt_model = 'llama-7b-4bit.pt'
+        elif path_to_model.name.lower().startswith('llama-13b'):
+            pt_model = 'llama-13b-4bit.pt'
+        elif path_to_model.name.lower().startswith('llama-30b'):
+            pt_model = 'llama-30b-4bit.pt'
+        elif path_to_model.name.lower().startswith('llama-65b'):
+            pt_model = 'llama-65b-4bit.pt'
+        else:
+            pt_model = f'{model_name}-4bit.pt'
+
+        # Try to find the .pt both in models/ and in the subfolder
+        pt_path = None
+        for path in [Path(p) for p in [f"models/{pt_model}", f"{path_to_model}/{pt_model}"]]:
+            if path.exists():
+                pt_path = path
+
+        if not pt_path:
+            print(f"Could not find {pt_model}, exiting...")
+            exit()
+
+        model = load_quant(path_to_model, Path(f"models/{pt_model}"), 4)
+
+        # Multi-GPU setup
+        if shared.args.gpu_memory:
+            import accelerate
+
+            max_memory = {}
+            for i in range(len(shared.args.gpu_memory)):
+                max_memory[i] = f"{shared.args.gpu_memory[i]}GiB"
+            max_memory['cpu'] = f"{shared.args.cpu_memory or '99'}GiB"
+
+            device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LLaMADecoderLayer"])
+            model = accelerate.dispatch_model(model, device_map=device_map)
+
+        # Single GPU
+        else:
+            model = model.to(torch.device('cuda:0'))
+
     # Custom
     else:
         command = "AutoModelForCausalLM.from_pretrained"

+ 1 - 1
modules/shared.py

@@ -43,7 +43,6 @@ settings = {
         'default': 'NovelAI-Sphinx Moth',
         'pygmalion-*': 'Pygmalion',
         'RWKV-*': 'Naive',
-        '(rosey|chip|joi)_.*_instruct.*': 'Instruct Joi (Contrastive Search)'
     },
     'prompts': {
         'default': 'Common sense questions and answers\n\nQuestion: \nFactual answer:',
@@ -69,6 +68,7 @@ parser.add_argument('--chat', action='store_true', help='Launch the web UI in ch
 parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.')
 parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
 parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
+parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision. Currently only works with LLaMA.')
 parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
 parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
 parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')

+ 3 - 0
presets/Contrastive Search.txt

@@ -0,0 +1,3 @@
+do_sample=False
+penalty_alpha=0.6
+top_k=4

+ 0 - 5
presets/Instruct Joi (Contrastive Search).txt

@@ -1,5 +0,0 @@
-top_p=0.95
-temperature=0.5
-penalty_alpha=0.6
-top_k=4
-repetition_penalty=1.03

+ 1 - 1
requirements.txt

@@ -6,4 +6,4 @@ numpy
 rwkv==0.1.0
 safetensors==0.2.8
 sentencepiece
-git+https://github.com/oobabooga/transformers@llama_push
+git+https://github.com/zphang/transformers@llama_push

+ 5 - 4
server.py

@@ -37,7 +37,7 @@ def get_available_models():
     if shared.args.flexgen:
         return sorted([re.sub('-np$', '', item.name) for item in list(Path('models/').glob('*')) if item.name.endswith('-np')], key=str.lower)
     else:
-        return sorted([item.name for item in list(Path('models/').glob('*')) if not item.name.endswith(('.txt', '-np'))], key=str.lower)
+        return sorted([item.name for item in list(Path('models/').glob('*')) if not item.name.endswith(('.txt', '-np', '.pt'))], key=str.lower)
 
 def get_available_presets():
     return sorted(set(map(lambda x : '.'.join(str(x.name).split('.')[:-1]), Path('presets').glob('*.txt'))), key=str.lower)
@@ -197,11 +197,12 @@ shared.model, shared.tokenizer = load_model(shared.model_name)
 gen_events = []
 default_preset = shared.settings['presets'][next((k for k in shared.settings['presets'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
 default_text = shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
+title ='Text generation web UI'
 description = '\n\n# Text generation lab\nGenerate text using Large Language Models.\n'
 suffix = '_pygmalion' if 'pygmalion' in shared.model_name.lower() else ''
 
 if shared.args.chat or shared.args.cai_chat:
-    with gr.Blocks(css=ui.css+ui.chat_css, analytics_enabled=False) as shared.gradio['interface']:
+    with gr.Blocks(css=ui.css+ui.chat_css, analytics_enabled=False, title=title) as shared.gradio['interface']:
         if shared.args.cai_chat:
             shared.gradio['display'] = gr.HTML(value=generate_chat_html(shared.history['visible'], shared.settings[f'name1{suffix}'], shared.settings[f'name2{suffix}'], shared.character))
         else:
@@ -313,7 +314,7 @@ if shared.args.chat or shared.args.cai_chat:
         shared.gradio['interface'].load(reload_func, reload_inputs, [shared.gradio['display']], show_progress=True)
 
 elif shared.args.notebook:
-    with gr.Blocks(css=ui.css, analytics_enabled=False) as shared.gradio['interface']:
+    with gr.Blocks(css=ui.css, analytics_enabled=False, title=title) as shared.gradio['interface']:
         gr.Markdown(description)
         with gr.Tab('Raw'):
             shared.gradio['textbox'] = gr.Textbox(value=default_text, lines=23)
@@ -337,7 +338,7 @@ elif shared.args.notebook:
         shared.gradio['Stop'].click(None, None, None, cancels=gen_events)
 
 else:
-    with gr.Blocks(css=ui.css, analytics_enabled=False) as shared.gradio['interface']:
+    with gr.Blocks(css=ui.css, analytics_enabled=False, title=title) as shared.gradio['interface']:
         gr.Markdown(description)
         with gr.Row():
             with gr.Column():