Просмотр исходного кода

Re-implement --load-in-4bit and update --llama-bits arg description

draff 2 лет назад
Родитель
Сommit
804486214b
3 измененных файлов с 10 добавлено и 4 удалено
  1. 2 1
      README.md
  2. 6 2
      modules/models.py
  3. 2 1
      modules/shared.py

+ 2 - 1
README.md

@@ -138,7 +138,8 @@ Optionally, you can use the following command-line flags:
 | `--cai-chat`  | Launch the web UI in chat mode with a style similar to Character.AI's. If the file `img_bot.png` or `img_bot.jpg` exists in the same folder as server.py, this image will be used as the bot's profile picture. Similarly, `img_me.png` or `img_me.jpg` will be used as your profile picture. |
 | `--cpu`       | Use the CPU to generate text.|
 | `--load-in-8bit`  | Load the model with 8-bit precision.|
-| `--llama-bits`  |  Load LLaMA models with specified precision. 2, 3 and 4 bit are supported, use standard `--load-in-8bit` for 8bit precision. |
+| `--load-in-4bit`  | Load the model with 4-bit precision. Currently only works with LLaMA.|
+| `--llama-bits`  |  Load pre-quantized models with specified precision. 2, 3, 4 and 8bit are supported. Currently only works with LLaMA. |
 | `--bf16`  | Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. |
 | `--auto-devices` | Automatically split the model across the available GPU(s) and CPU.|
 | `--disk` | If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. |

+ 6 - 2
modules/models.py

@@ -88,9 +88,13 @@ def load_model(model_name):
         return model, tokenizer
 
     # 4-bit LLaMA
-    elif shared.args.llama_bits>0:
+    elif shared.args.llama_bits>0 or shared.args.load_in_4bit:
         sys.path.insert(0, os.path.abspath(Path("repositories/GPTQ-for-LLaMa")))
-        bits = shared.args.llama_bits
+        if shared.args.load_in_4bit:
+            bits = 4
+        else:
+            bits = shared.args.llama_bits
+        
 
         from llama import load_quant
 

+ 2 - 1
modules/shared.py

@@ -67,7 +67,8 @@ parser.add_argument('--chat', action='store_true', help='Launch the web UI in ch
 parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.')
 parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
 parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
-parser.add_argument('--llama-bits', type=int, default=0, help='Load LLaMA models with specified precision. 2, 3 and 4 bit are supported, use standard `--load-in-8bit` for 8bit precision.')
+parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision. Currently only works with LLaMA.')
+parser.add_argument('--llama-bits', type=int, default=0, help='Load pre-quantized models with specified precision. 2, 3, 4 and 8bit are supported. Currently only works with LLaMA.')
 parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
 parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
 parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')