oobabooga 3 лет назад
Родитель
Сommit
c90310e40e
1 измененных файлов с 1 добавлено и 2 удалено
  1. 1 2
      server.py

+ 1 - 2
server.py

@@ -140,10 +140,10 @@ def generate_reply(question, tokens, inference_settings, selected_model, eos_tok
             preset = infile.read()
             preset = infile.read()
         loaded_preset = inference_settings
         loaded_preset = inference_settings
 
 
+    cuda = "" if args.cpu else ".cuda()"
     if not args.no_stream:
     if not args.no_stream:
         input_ids = encode(question, 1)
         input_ids = encode(question, 1)
         preset = preset.replace('max_new_tokens=tokens', 'max_new_tokens=1')
         preset = preset.replace('max_new_tokens=tokens', 'max_new_tokens=1')
-        cuda = "" if args.cpu else ".cuda()"
         for i in range(tokens):
         for i in range(tokens):
             output = eval(f"model.generate(input_ids, {preset}){cuda}")
             output = eval(f"model.generate(input_ids, {preset}){cuda}")
             reply = tokenizer.decode(output[0], skip_special_tokens=True)
             reply = tokenizer.decode(output[0], skip_special_tokens=True)
@@ -162,7 +162,6 @@ def generate_reply(question, tokens, inference_settings, selected_model, eos_tok
             input_ids = output
             input_ids = output
     else:
     else:
         input_ids = encode(question, tokens)
         input_ids = encode(question, tokens)
-        cuda = "" if args.cpu else ".cuda()"
         if eos_token is None:
         if eos_token is None:
             output = eval(f"model.generate(input_ids, {preset}){cuda}")
             output = eval(f"model.generate(input_ids, {preset}){cuda}")
         else:
         else: