Browse Source

Allow loading of .safetensors through GPTQ-for-LLaMa

EyeDeck 2 years ago
parent
commit
dcfd866402
1 changed files with 9 additions and 7 deletions
  1. 9 7
      modules/GPTQ_loader.py

+ 9 - 7
modules/GPTQ_loader.py

@@ -37,21 +37,23 @@ def load_quantized(model_name):
 
     path_to_model = Path(f'models/{model_name}')
     if path_to_model.name.lower().startswith('llama-7b'):
-        pt_model = f'llama-7b-{shared.args.gptq_bits}bit.pt'
+        pt_model = f'llama-7b-{shared.args.gptq_bits}bit'
     elif path_to_model.name.lower().startswith('llama-13b'):
-        pt_model = f'llama-13b-{shared.args.gptq_bits}bit.pt'
+        pt_model = f'llama-13b-{shared.args.gptq_bits}bit'
     elif path_to_model.name.lower().startswith('llama-30b'):
-        pt_model = f'llama-30b-{shared.args.gptq_bits}bit.pt'
+        pt_model = f'llama-30b-{shared.args.gptq_bits}bit'
     elif path_to_model.name.lower().startswith('llama-65b'):
-        pt_model = f'llama-65b-{shared.args.gptq_bits}bit.pt'
+        pt_model = f'llama-65b-{shared.args.gptq_bits}bit'
     else:
-        pt_model = f'{model_name}-{shared.args.gptq_bits}bit.pt'
+        pt_model = f'{model_name}-{shared.args.gptq_bits}bit'
 
-    # Try to find the .pt both in models/ and in the subfolder
+    # Try to find the .safetensors or .pt both in models/ and in the subfolder
     pt_path = None
-    for path in [Path(p) for p in [f"models/{pt_model}", f"{path_to_model}/{pt_model}"]]:
+    for path in [Path(p+ext) for ext in ['.safetensors', '.pt'] for p in [f"models/{pt_model}", f"{path_to_model}/{pt_model}"]]:
         if path.exists():
+            print(f"Found {path}")
             pt_path = path
+            break
 
     if not pt_path:
         print(f"Could not find {pt_model}, exiting...")