|
@@ -99,25 +99,37 @@ def set_manual_seed(seed):
|
|
|
if torch.cuda.is_available():
|
|
if torch.cuda.is_available():
|
|
|
torch.cuda.manual_seed_all(seed)
|
|
torch.cuda.manual_seed_all(seed)
|
|
|
|
|
|
|
|
-def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, eos_token=None, stopping_string=None):
|
|
|
|
|
|
|
+def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, encoder_repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, seed, eos_token=None, stopping_strings=[]):
|
|
|
clear_torch_cache()
|
|
clear_torch_cache()
|
|
|
set_manual_seed(seed)
|
|
set_manual_seed(seed)
|
|
|
t0 = time.time()
|
|
t0 = time.time()
|
|
|
|
|
|
|
|
|
|
+ original_question = question
|
|
|
|
|
+ if not (shared.args.chat or shared.args.cai_chat):
|
|
|
|
|
+ question = apply_extensions(question, "input")
|
|
|
|
|
+ if shared.args.verbose:
|
|
|
|
|
+ print(f"\n\n{question}\n--------------------\n")
|
|
|
|
|
+
|
|
|
# These models are not part of Hugging Face, so we handle them
|
|
# These models are not part of Hugging Face, so we handle them
|
|
|
# separately and terminate the function call earlier
|
|
# separately and terminate the function call earlier
|
|
|
if shared.is_RWKV:
|
|
if shared.is_RWKV:
|
|
|
try:
|
|
try:
|
|
|
if shared.args.no_stream:
|
|
if shared.args.no_stream:
|
|
|
reply = shared.model.generate(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k)
|
|
reply = shared.model.generate(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k)
|
|
|
|
|
+ if not (shared.args.chat or shared.args.cai_chat):
|
|
|
|
|
+ reply = original_question + apply_extensions(reply, "output")
|
|
|
yield formatted_outputs(reply, shared.model_name)
|
|
yield formatted_outputs(reply, shared.model_name)
|
|
|
else:
|
|
else:
|
|
|
if not (shared.args.chat or shared.args.cai_chat):
|
|
if not (shared.args.chat or shared.args.cai_chat):
|
|
|
yield formatted_outputs(question, shared.model_name)
|
|
yield formatted_outputs(question, shared.model_name)
|
|
|
|
|
+
|
|
|
# RWKV has proper streaming, which is very nice.
|
|
# RWKV has proper streaming, which is very nice.
|
|
|
# No need to generate 8 tokens at a time.
|
|
# No need to generate 8 tokens at a time.
|
|
|
for reply in shared.model.generate_with_streaming(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k):
|
|
for reply in shared.model.generate_with_streaming(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k):
|
|
|
|
|
+ if not (shared.args.chat or shared.args.cai_chat):
|
|
|
|
|
+ reply = original_question + apply_extensions(reply, "output")
|
|
|
yield formatted_outputs(reply, shared.model_name)
|
|
yield formatted_outputs(reply, shared.model_name)
|
|
|
|
|
+
|
|
|
except Exception:
|
|
except Exception:
|
|
|
traceback.print_exc()
|
|
traceback.print_exc()
|
|
|
finally:
|
|
finally:
|
|
@@ -127,12 +139,6 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|
|
print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output)-len(input_ids[0]))/(t1-t0):.2f} tokens/s, {len(output)-len(input_ids[0])} tokens)")
|
|
print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output)-len(input_ids[0]))/(t1-t0):.2f} tokens/s, {len(output)-len(input_ids[0])} tokens)")
|
|
|
return
|
|
return
|
|
|
|
|
|
|
|
- original_question = question
|
|
|
|
|
- if not (shared.args.chat or shared.args.cai_chat):
|
|
|
|
|
- question = apply_extensions(question, "input")
|
|
|
|
|
- if shared.args.verbose:
|
|
|
|
|
- print(f"\n\n{question}\n--------------------\n")
|
|
|
|
|
-
|
|
|
|
|
input_ids = encode(question, max_new_tokens)
|
|
input_ids = encode(question, max_new_tokens)
|
|
|
original_input_ids = input_ids
|
|
original_input_ids = input_ids
|
|
|
output = input_ids[0]
|
|
output = input_ids[0]
|
|
@@ -142,9 +148,8 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|
|
if eos_token is not None:
|
|
if eos_token is not None:
|
|
|
eos_token_ids.append(int(encode(eos_token)[0][-1]))
|
|
eos_token_ids.append(int(encode(eos_token)[0][-1]))
|
|
|
stopping_criteria_list = transformers.StoppingCriteriaList()
|
|
stopping_criteria_list = transformers.StoppingCriteriaList()
|
|
|
- if stopping_string is not None:
|
|
|
|
|
- # Copied from https://github.com/PygmalionAI/gradio-ui/blob/master/src/model.py
|
|
|
|
|
- t = encode(stopping_string, 0, add_special_tokens=False)
|
|
|
|
|
|
|
+ if type(stopping_strings) is list and len(stopping_strings) > 0:
|
|
|
|
|
+ t = [encode(string, 0, add_special_tokens=False) for string in stopping_strings]
|
|
|
stopping_criteria_list.append(_SentinelTokenStoppingCriteria(sentinel_token_ids=t, starting_idx=len(input_ids[0])))
|
|
stopping_criteria_list.append(_SentinelTokenStoppingCriteria(sentinel_token_ids=t, starting_idx=len(input_ids[0])))
|
|
|
|
|
|
|
|
generate_params = {}
|
|
generate_params = {}
|
|
@@ -195,12 +200,10 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|
|
if shared.soft_prompt:
|
|
if shared.soft_prompt:
|
|
|
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
|
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
|
|
|
|
|
|
|
|
|
+ new_tokens = len(output) - len(input_ids[0])
|
|
|
|
|
+ reply = decode(output[-new_tokens:])
|
|
|
if not (shared.args.chat or shared.args.cai_chat):
|
|
if not (shared.args.chat or shared.args.cai_chat):
|
|
|
- new_tokens = len(output) - len(input_ids[0])
|
|
|
|
|
- reply = decode(output[-new_tokens:])
|
|
|
|
|
reply = original_question + apply_extensions(reply, "output")
|
|
reply = original_question + apply_extensions(reply, "output")
|
|
|
- else:
|
|
|
|
|
- reply = decode(output)
|
|
|
|
|
|
|
|
|
|
yield formatted_outputs(reply, shared.model_name)
|
|
yield formatted_outputs(reply, shared.model_name)
|
|
|
|
|
|
|
@@ -223,12 +226,11 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|
|
for output in generator:
|
|
for output in generator:
|
|
|
if shared.soft_prompt:
|
|
if shared.soft_prompt:
|
|
|
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
|
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
|
|
|
|
+
|
|
|
|
|
+ new_tokens = len(output) - len(input_ids[0])
|
|
|
|
|
+ reply = decode(output[-new_tokens:])
|
|
|
if not (shared.args.chat or shared.args.cai_chat):
|
|
if not (shared.args.chat or shared.args.cai_chat):
|
|
|
- new_tokens = len(output) - len(input_ids[0])
|
|
|
|
|
- reply = decode(output[-new_tokens:])
|
|
|
|
|
reply = original_question + apply_extensions(reply, "output")
|
|
reply = original_question + apply_extensions(reply, "output")
|
|
|
- else:
|
|
|
|
|
- reply = decode(output)
|
|
|
|
|
|
|
|
|
|
if output[-1] in eos_token_ids:
|
|
if output[-1] in eos_token_ids:
|
|
|
break
|
|
break
|
|
@@ -244,12 +246,11 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|
|
output = shared.model.generate(**generate_params)[0]
|
|
output = shared.model.generate(**generate_params)[0]
|
|
|
if shared.soft_prompt:
|
|
if shared.soft_prompt:
|
|
|
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
|
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
|
|
|
|
+
|
|
|
|
|
+ new_tokens = len(output) - len(original_input_ids[0])
|
|
|
|
|
+ reply = decode(output[-new_tokens:])
|
|
|
if not (shared.args.chat or shared.args.cai_chat):
|
|
if not (shared.args.chat or shared.args.cai_chat):
|
|
|
- new_tokens = len(output) - len(original_input_ids[0])
|
|
|
|
|
- reply = decode(output[-new_tokens:])
|
|
|
|
|
reply = original_question + apply_extensions(reply, "output")
|
|
reply = original_question + apply_extensions(reply, "output")
|
|
|
- else:
|
|
|
|
|
- reply = decode(output)
|
|
|
|
|
|
|
|
|
|
if np.count_nonzero(np.isin(input_ids[0], eos_token_ids)) < np.count_nonzero(np.isin(output, eos_token_ids)):
|
|
if np.count_nonzero(np.isin(input_ids[0], eos_token_ids)) < np.count_nonzero(np.isin(output, eos_token_ids)):
|
|
|
break
|
|
break
|