Procházet zdrojové kódy

Replace --load-in-4bit with --llama-bits

Replaces --load-in-4bit with a more flexible --llama-bits arg to allow for 2 and 3 bit models as well. This commit also fixes a loading issue with .pt files which are not in the root of the models folder
draff před 2 roky
rodič
revize
e6c631aea4
3 změnil soubory, kde provedl 11 přidání a 10 odebrání
  1. 1 1
      README.md
  2. 9 8
      modules/models.py
  3. 1 1
      modules/shared.py

+ 1 - 1
README.md

@@ -138,7 +138,7 @@ Optionally, you can use the following command-line flags:
 | `--cai-chat`  | Launch the web UI in chat mode with a style similar to Character.AI's. If the file `img_bot.png` or `img_bot.jpg` exists in the same folder as server.py, this image will be used as the bot's profile picture. Similarly, `img_me.png` or `img_me.jpg` will be used as your profile picture. |
 | `--cai-chat`  | Launch the web UI in chat mode with a style similar to Character.AI's. If the file `img_bot.png` or `img_bot.jpg` exists in the same folder as server.py, this image will be used as the bot's profile picture. Similarly, `img_me.png` or `img_me.jpg` will be used as your profile picture. |
 | `--cpu`       | Use the CPU to generate text.|
 | `--cpu`       | Use the CPU to generate text.|
 | `--load-in-8bit`  | Load the model with 8-bit precision.|
 | `--load-in-8bit`  | Load the model with 8-bit precision.|
-| `--load-in-4bit`  |  Load the model with 4-bit precision. Currently only works with LLaMA. |
+| `--llama-bits`  |  Load LLaMA models with specified precision. 2, 3 and 4 bit are supported, use standard `--load-in-8bit` for 8bit precision. |
 | `--bf16`  | Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. |
 | `--bf16`  | Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. |
 | `--auto-devices` | Automatically split the model across the available GPU(s) and CPU.|
 | `--auto-devices` | Automatically split the model across the available GPU(s) and CPU.|
 | `--disk` | If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. |
 | `--disk` | If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. |

+ 9 - 8
modules/models.py

@@ -42,7 +42,7 @@ def load_model(model_name):
     shared.is_RWKV = model_name.lower().startswith('rwkv-')
     shared.is_RWKV = model_name.lower().startswith('rwkv-')
 
 
     # Default settings
     # Default settings
-    if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.load_in_4bit or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen or shared.is_RWKV):
+    if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.llama_bits>0 or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen or shared.is_RWKV):
         if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
         if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
             model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
             model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
         else:
         else:
@@ -88,23 +88,24 @@ def load_model(model_name):
         return model, tokenizer
         return model, tokenizer
 
 
     # 4-bit LLaMA
     # 4-bit LLaMA
-    elif shared.args.load_in_4bit:
+    elif shared.args.llama_bits>0:
         sys.path.insert(0, os.path.abspath(Path("repositories/GPTQ-for-LLaMa")))
         sys.path.insert(0, os.path.abspath(Path("repositories/GPTQ-for-LLaMa")))
+        bits = shared.args.llama_bits
 
 
         from llama import load_quant
         from llama import load_quant
 
 
         path_to_model = Path(f'models/{model_name}')
         path_to_model = Path(f'models/{model_name}')
         pt_model = ''
         pt_model = ''
         if path_to_model.name.lower().startswith('llama-7b'):
         if path_to_model.name.lower().startswith('llama-7b'):
-            pt_model = 'llama-7b-4bit.pt'
+            pt_model = f'llama-7b-{bits}bit.pt'
         elif path_to_model.name.lower().startswith('llama-13b'):
         elif path_to_model.name.lower().startswith('llama-13b'):
-            pt_model = 'llama-13b-4bit.pt'
+            pt_model = f'llama-13b-{bits}bit.pt'
         elif path_to_model.name.lower().startswith('llama-30b'):
         elif path_to_model.name.lower().startswith('llama-30b'):
-            pt_model = 'llama-30b-4bit.pt'
+            pt_model = f'llama-30b-{bits}bit.pt'
         elif path_to_model.name.lower().startswith('llama-65b'):
         elif path_to_model.name.lower().startswith('llama-65b'):
-            pt_model = 'llama-65b-4bit.pt'
+            pt_model = f'llama-65b-{bits}bit.pt'
         else:
         else:
-            pt_model = f'{model_name}-4bit.pt'
+            pt_model = f'{model_name}-{bits}bit.pt'
 
 
         # Try to find the .pt both in models/ and in the subfolder
         # Try to find the .pt both in models/ and in the subfolder
         pt_path = None
         pt_path = None
@@ -116,7 +117,7 @@ def load_model(model_name):
             print(f"Could not find {pt_model}, exiting...")
             print(f"Could not find {pt_model}, exiting...")
             exit()
             exit()
 
 
-        model = load_quant(path_to_model, Path(f"models/{pt_model}"), 4)
+        model = load_quant(path_to_model, Path(f"{pt_path}"), bits)
 
 
         # Multi-GPU setup
         # Multi-GPU setup
         if shared.args.gpu_memory:
         if shared.args.gpu_memory:

+ 1 - 1
modules/shared.py

@@ -67,7 +67,7 @@ parser.add_argument('--chat', action='store_true', help='Launch the web UI in ch
 parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.')
 parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.')
 parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
 parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
 parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
 parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
-parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision. Currently only works with LLaMA.')
+parser.add_argument('--llama-bits', type=int, default=0, help='Load LLaMA models with specified precision. 2, 3 and 4 bit are supported, use standard `--load-in-8bit` for 8bit precision.')
 parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
 parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
 parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
 parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
 parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
 parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')