Rename variables
This commit is contained in:
@@ -96,9 +96,9 @@ def extract_message_from_reply(reply, name1, name2, stop_at_newline):
|
||||
reply = fix_newlines(reply)
|
||||
return reply, next_character_found
|
||||
|
||||
def chatbot_wrapper(text, generate_params, name1, name2, context, mode, end_of_turn, regenerate=False):
|
||||
def chatbot_wrapper(text, generate_state, name1, name2, context, mode, end_of_turn, regenerate=False):
|
||||
just_started = True
|
||||
eos_token = '\n' if generate_params['stop_at_newline'] else None
|
||||
eos_token = '\n' if generate_state['stop_at_newline'] else None
|
||||
name1_original = name1
|
||||
if 'pygmalion' in shared.model_name.lower():
|
||||
name1 = "You"
|
||||
@@ -119,9 +119,9 @@ def chatbot_wrapper(text, generate_params, name1, name2, context, mode, end_of_t
|
||||
|
||||
kwargs = {'end_of_turn': end_of_turn, 'is_instruct': mode == 'instruct'}
|
||||
if custom_generate_chat_prompt is None:
|
||||
prompt = generate_chat_prompt(text, generate_params['max_new_tokens'], name1, name2, context, generate_params['chat_prompt_size'], **kwargs)
|
||||
prompt = generate_chat_prompt(text, generate_state['max_new_tokens'], name1, name2, context, generate_state['chat_prompt_size'], **kwargs)
|
||||
else:
|
||||
prompt = custom_generate_chat_prompt(text, generate_params['max_new_tokens'], name1, name2, context, generate_params['chat_prompt_size'], **kwargs)
|
||||
prompt = custom_generate_chat_prompt(text, generate_state['max_new_tokens'], name1, name2, context, generate_state['chat_prompt_size'], **kwargs)
|
||||
|
||||
# Yield *Is typing...*
|
||||
if not regenerate:
|
||||
@@ -129,13 +129,13 @@ def chatbot_wrapper(text, generate_params, name1, name2, context, mode, end_of_t
|
||||
|
||||
# Generate
|
||||
cumulative_reply = ''
|
||||
for i in range(generate_params['chat_generation_attempts']):
|
||||
for i in range(generate_state['chat_generation_attempts']):
|
||||
reply = None
|
||||
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generate_params, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]):
|
||||
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generate_state, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]):
|
||||
reply = cumulative_reply + reply
|
||||
|
||||
# Extracting the reply
|
||||
reply, next_character_found = extract_message_from_reply(reply, name1, name2, generate_params['stop_at_newline'])
|
||||
reply, next_character_found = extract_message_from_reply(reply, name1, name2, generate_state['stop_at_newline'])
|
||||
visible_reply = re.sub("(<USER>|<user>|{{user}})", name1_original, reply)
|
||||
visible_reply = apply_extensions(visible_reply, "output")
|
||||
|
||||
@@ -160,23 +160,23 @@ def chatbot_wrapper(text, generate_params, name1, name2, context, mode, end_of_t
|
||||
|
||||
yield shared.history['visible']
|
||||
|
||||
def impersonate_wrapper(text, generate_params, name1, name2, context, mode, end_of_turn):
|
||||
eos_token = '\n' if generate_params['stop_at_newline'] else None
|
||||
def impersonate_wrapper(text, generate_state, name1, name2, context, mode, end_of_turn):
|
||||
eos_token = '\n' if generate_state['stop_at_newline'] else None
|
||||
|
||||
if 'pygmalion' in shared.model_name.lower():
|
||||
name1 = "You"
|
||||
|
||||
prompt = generate_chat_prompt(text, generate_params['max_new_tokens'], name1, name2, context, generate_params['chat_prompt_size'], impersonate=True, end_of_turn=end_of_turn)
|
||||
prompt = generate_chat_prompt(text, generate_state['max_new_tokens'], name1, name2, context, generate_state['chat_prompt_size'], impersonate=True, end_of_turn=end_of_turn)
|
||||
|
||||
# Yield *Is typing...*
|
||||
yield shared.processing_message
|
||||
|
||||
cumulative_reply = ''
|
||||
for i in range(generate_params['chat_generation_attempts']):
|
||||
for i in range(generate_state['chat_generation_attempts']):
|
||||
reply = None
|
||||
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generate_params, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]):
|
||||
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generate_state, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]):
|
||||
reply = cumulative_reply + reply
|
||||
reply, next_character_found = extract_message_from_reply(reply, name1, name2, generate_params['stop_at_newline'])
|
||||
reply, next_character_found = extract_message_from_reply(reply, name1, name2, generate_state['stop_at_newline'])
|
||||
yield reply
|
||||
if next_character_found:
|
||||
break
|
||||
@@ -186,11 +186,11 @@ def impersonate_wrapper(text, generate_params, name1, name2, context, mode, end_
|
||||
|
||||
yield reply
|
||||
|
||||
def cai_chatbot_wrapper(text, generate_params, name1, name2, context, mode, end_of_turn):
|
||||
for history in chatbot_wrapper(text, generate_params, name1, name2, context, mode, end_of_turn, regenerate=False):
|
||||
def cai_chatbot_wrapper(text, generate_state, name1, name2, context, mode, end_of_turn):
|
||||
for history in chatbot_wrapper(text, generate_state, name1, name2, context, mode, end_of_turn, regenerate=False):
|
||||
yield chat_html_wrapper(history, name1, name2, mode)
|
||||
|
||||
def regenerate_wrapper(text, generate_params, name1, name2, context, mode, end_of_turn):
|
||||
def regenerate_wrapper(text, generate_state, name1, name2, context, mode, end_of_turn):
|
||||
if (shared.character != 'None' and len(shared.history['visible']) == 1) or len(shared.history['internal']) == 0:
|
||||
yield chat_html_wrapper(shared.history['visible'], name1, name2, mode)
|
||||
else:
|
||||
@@ -198,7 +198,7 @@ def regenerate_wrapper(text, generate_params, name1, name2, context, mode, end_o
|
||||
last_internal = shared.history['internal'].pop()
|
||||
# Yield '*Is typing...*'
|
||||
yield chat_html_wrapper(shared.history['visible']+[[last_visible[0], shared.processing_message]], name1, name2, mode)
|
||||
for history in chatbot_wrapper(last_internal[0], generate_params, name1, name2, context, mode, end_of_turn, regenerate=True):
|
||||
for history in chatbot_wrapper(last_internal[0], generate_state, name1, name2, context, mode, end_of_turn, regenerate=True):
|
||||
shared.history['visible'][-1] = [last_visible[0], history[-1][1]]
|
||||
yield chat_html_wrapper(shared.history['visible'], name1, name2, mode)
|
||||
|
||||
|
||||
@@ -102,11 +102,11 @@ def set_manual_seed(seed):
|
||||
def stop_everything_event():
|
||||
shared.stop_everything = True
|
||||
|
||||
def generate_reply(question, generate_params, eos_token=None, stopping_strings=[]):
|
||||
def generate_reply(question, generate_state, eos_token=None, stopping_strings=[]):
|
||||
clear_torch_cache()
|
||||
set_manual_seed(generate_params['seed'])
|
||||
set_manual_seed(generate_state['seed'])
|
||||
shared.stop_everything = False
|
||||
updated_params = {}
|
||||
generate_params = {}
|
||||
t0 = time.time()
|
||||
|
||||
original_question = question
|
||||
@@ -119,11 +119,11 @@ def generate_reply(question, generate_params, eos_token=None, stopping_strings=[
|
||||
# separately and terminate the function call earlier
|
||||
if any((shared.is_RWKV, shared.is_llamacpp)):
|
||||
for k in ['temperature', 'top_p', 'top_k', 'repetition_penalty']:
|
||||
updated_params[k] = generate_params[k]
|
||||
updated_params["token_count"] = generate_params["max_new_tokens"]
|
||||
generate_params[k] = generate_state[k]
|
||||
generate_params["token_count"] = generate_state["max_new_tokens"]
|
||||
try:
|
||||
if shared.args.no_stream:
|
||||
reply = shared.model.generate(context=question, **updated_params)
|
||||
reply = shared.model.generate(context=question, **generate_params)
|
||||
output = original_question+reply
|
||||
if not shared.is_chat():
|
||||
reply = original_question + apply_extensions(reply, "output")
|
||||
@@ -134,7 +134,7 @@ def generate_reply(question, generate_params, eos_token=None, stopping_strings=[
|
||||
|
||||
# RWKV has proper streaming, which is very nice.
|
||||
# No need to generate 8 tokens at a time.
|
||||
for reply in shared.model.generate_with_streaming(context=question, **updated_params):
|
||||
for reply in shared.model.generate_with_streaming(context=question, **generate_params):
|
||||
output = original_question+reply
|
||||
if not shared.is_chat():
|
||||
reply = original_question + apply_extensions(reply, "output")
|
||||
@@ -149,7 +149,7 @@ def generate_reply(question, generate_params, eos_token=None, stopping_strings=[
|
||||
print(f"Output generated in {(t1-t0):.2f} seconds ({new_tokens/(t1-t0):.2f} tokens/s, {new_tokens} tokens, context {original_tokens})")
|
||||
return
|
||||
|
||||
input_ids = encode(question, generate_params['max_new_tokens'])
|
||||
input_ids = encode(question, generate_state['max_new_tokens'])
|
||||
original_input_ids = input_ids
|
||||
output = input_ids[0]
|
||||
|
||||
@@ -162,37 +162,37 @@ def generate_reply(question, generate_params, eos_token=None, stopping_strings=[
|
||||
t = [encode(string, 0, add_special_tokens=False) for string in stopping_strings]
|
||||
stopping_criteria_list.append(_SentinelTokenStoppingCriteria(sentinel_token_ids=t, starting_idx=len(input_ids[0])))
|
||||
|
||||
updated_params["max_new_tokens"] = generate_params['max_new_tokens']
|
||||
generate_params["max_new_tokens"] = generate_state['max_new_tokens']
|
||||
if not shared.args.flexgen:
|
||||
for k in ["do_sample", "temperature", "top_p", "typical_p", "repetition_penalty", "encoder_repetition_penalty", "top_k", "min_length", "no_repeat_ngram_size", "num_beams", "penalty_alpha", "length_penalty", "early_stopping"]:
|
||||
updated_params[k] = generate_params[k]
|
||||
updated_params["eos_token_id"] = eos_token_ids
|
||||
updated_params["stopping_criteria"] = stopping_criteria_list
|
||||
generate_params[k] = generate_state[k]
|
||||
generate_params["eos_token_id"] = eos_token_ids
|
||||
generate_params["stopping_criteria"] = stopping_criteria_list
|
||||
if shared.args.no_stream:
|
||||
updated_params["min_length"] = 0
|
||||
generate_params["min_length"] = 0
|
||||
else:
|
||||
for k in ["do_sample", "temperature"]:
|
||||
updated_params[k] = generate_params[k]
|
||||
updated_params["stop"] = generate_params["eos_token_ids"][-1]
|
||||
generate_params[k] = generate_state[k]
|
||||
generate_params["stop"] = generate_state["eos_token_ids"][-1]
|
||||
if not shared.args.no_stream:
|
||||
updated_params["max_new_tokens"] = 8
|
||||
generate_params["max_new_tokens"] = 8
|
||||
|
||||
if shared.args.no_cache:
|
||||
updated_params.update({"use_cache": False})
|
||||
generate_params.update({"use_cache": False})
|
||||
if shared.args.deepspeed:
|
||||
updated_params.update({"synced_gpus": True})
|
||||
generate_params.update({"synced_gpus": True})
|
||||
if shared.soft_prompt:
|
||||
inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
|
||||
updated_params.update({"inputs_embeds": inputs_embeds})
|
||||
updated_params.update({"inputs": filler_input_ids})
|
||||
generate_params.update({"inputs_embeds": inputs_embeds})
|
||||
generate_params.update({"inputs": filler_input_ids})
|
||||
else:
|
||||
updated_params.update({"inputs": input_ids})
|
||||
generate_params.update({"inputs": input_ids})
|
||||
|
||||
try:
|
||||
# Generate the entire reply at once.
|
||||
if shared.args.no_stream:
|
||||
with torch.no_grad():
|
||||
output = shared.model.generate(**updated_params)[0]
|
||||
output = shared.model.generate(**generate_params)[0]
|
||||
if cuda:
|
||||
output = output.cuda()
|
||||
if shared.soft_prompt:
|
||||
@@ -220,7 +220,7 @@ def generate_reply(question, generate_params, eos_token=None, stopping_strings=[
|
||||
|
||||
if not shared.is_chat():
|
||||
yield formatted_outputs(original_question, shared.model_name)
|
||||
with generate_with_streaming(**updated_params) as generator:
|
||||
with generate_with_streaming(**generate_params) as generator:
|
||||
for output in generator:
|
||||
if shared.soft_prompt:
|
||||
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
||||
@@ -236,10 +236,10 @@ def generate_reply(question, generate_params, eos_token=None, stopping_strings=[
|
||||
|
||||
# Stream the output naively for FlexGen since it doesn't support 'stopping_criteria'
|
||||
else:
|
||||
for i in range(generate_params['max_new_tokens']//8+1):
|
||||
for i in range(generate_state['max_new_tokens']//8+1):
|
||||
clear_torch_cache()
|
||||
with torch.no_grad():
|
||||
output = shared.model.generate(**updated_params)[0]
|
||||
output = shared.model.generate(**generate_params)[0]
|
||||
if shared.soft_prompt:
|
||||
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
||||
|
||||
@@ -255,10 +255,10 @@ def generate_reply(question, generate_params, eos_token=None, stopping_strings=[
|
||||
input_ids = np.reshape(output, (1, output.shape[0]))
|
||||
if shared.soft_prompt:
|
||||
inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
|
||||
updated_params.update({"inputs_embeds": inputs_embeds})
|
||||
updated_params.update({"inputs": filler_input_ids})
|
||||
generate_params.update({"inputs_embeds": inputs_embeds})
|
||||
generate_params.update({"inputs": filler_input_ids})
|
||||
else:
|
||||
updated_params.update({"inputs": input_ids})
|
||||
generate_params.update({"inputs": input_ids})
|
||||
|
||||
yield formatted_outputs(reply, shared.model_name)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user