|
|
@@ -44,7 +44,7 @@ def load_model(model_name):
|
|
|
shared.is_RWKV = model_name.lower().startswith('rwkv-')
|
|
|
|
|
|
# Default settings
|
|
|
- if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.gptq_bits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]):
|
|
|
+ if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.wbits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]):
|
|
|
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
|
|
|
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
|
|
|
else:
|
|
|
@@ -95,7 +95,7 @@ def load_model(model_name):
|
|
|
return model, tokenizer
|
|
|
|
|
|
# Quantized model
|
|
|
- elif shared.args.gptq_bits > 0:
|
|
|
+ elif shared.args.wbits > 0:
|
|
|
from modules.GPTQ_loader import load_quantized
|
|
|
|
|
|
model = load_quantized(model_name)
|