Browse Source

Merge pull request #263 from HideLord/main

Fixing compatibility with GPTQ repository
oobabooga 2 years ago
parent
commit
901dcba9b4
1 changed files with 1 additions and 1 deletions
  1. 1 1
      modules/quantized_LLaMA.py

+ 1 - 1
modules/quantized_LLaMA.py

@@ -41,7 +41,7 @@ def load_quantized_LLaMA(model_name):
         print(f"Could not find {pt_model}, exiting...")
         exit()
 
-    model = load_quant(path_to_model, pt_path, bits)
+    model = load_quant(path_to_model, os.path.abspath(pt_path), bits)
 
     # Multi-GPU setup
     if shared.args.gpu_memory: