瀏覽代碼

Don't use low_cpu_mem_usage and device_map together

oobabooga 3 年之前
父節點
當前提交
bb77f20a6c
共有 1 個文件被更改,包括 3 次插入2 次删除
  1. 3 2
      server.py

+ 3 - 2
server.py

@@ -87,10 +87,11 @@ def load_model(model_name):
             model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.float16).cuda()
             model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.float16).cuda()
     # Custom
     # Custom
     else:
     else:
-        settings = ["low_cpu_mem_usage=True"]
         command = "AutoModelForCausalLM.from_pretrained"
         command = "AutoModelForCausalLM.from_pretrained"
+        settings = []
 
 
         if args.cpu:
         if args.cpu:
+            settings.append("low_cpu_mem_usage=True")
             settings.append("torch_dtype=torch.float32")
             settings.append("torch_dtype=torch.float32")
         else:
         else:
             settings.append("device_map='auto'")
             settings.append("device_map='auto'")
@@ -374,7 +375,7 @@ if args.chat or args.cai_chat:
             reply = reply[idx + 1 + len(apply_extensions(f"{current}:", "bot_prefix")):]
             reply = reply[idx + 1 + len(apply_extensions(f"{current}:", "bot_prefix")):]
         else:
         else:
             reply = reply[idx + 1 + len(f"{current}:"):]
             reply = reply[idx + 1 + len(f"{current}:"):]
-        
+
         if check:
         if check:
             reply = reply.split('\n')[0].strip()
             reply = reply.split('\n')[0].strip()
         else:
         else: