|
|
@@ -42,9 +42,10 @@ def load_model(model_name):
|
|
|
t0 = time.time()
|
|
|
|
|
|
shared.is_RWKV = 'rwkv-' in model_name.lower()
|
|
|
+ shared.is_llamacpp = model_name.lower().startswith('llamacpp-')
|
|
|
|
|
|
# Default settings
|
|
|
- if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.wbits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]):
|
|
|
+ if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.wbits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV, shared.is_llamacpp]):
|
|
|
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
|
|
|
model = AutoModelForCausalLM.from_pretrained(Path(f"{shared.args.model_dir}/{shared.model_name}"), device_map='auto', load_in_8bit=True)
|
|
|
else:
|
|
|
@@ -100,6 +101,12 @@ def load_model(model_name):
|
|
|
|
|
|
model = load_quantized(model_name)
|
|
|
|
|
|
+ # LLAMACPP model
|
|
|
+ elif shared.is_llamacpp:
|
|
|
+ from modules.llamacpp_model import LlamaCppModel
|
|
|
+ model, tokenizer = LlamaCppModel.from_pretrained(Path(f'models/{model_name}/ggml-model-q4_0.bin'))
|
|
|
+ return model, tokenizer
|
|
|
+
|
|
|
# Custom
|
|
|
else:
|
|
|
params = {"low_cpu_mem_usage": True}
|