Rename a variable

This commit is contained in:
oobabooga
2023-04-05 22:32:52 -03:00
parent cfdbc8bd23
commit 77232fa68e
2 changed files with 25 additions and 25 deletions

View File

@@ -96,11 +96,11 @@ def extract_message_from_reply(reply, name1, name2, stop_at_newline):
reply = fix_newlines(reply)
return reply, next_character_found
def chatbot_wrapper(text, generation_params, name1, name2, context, regenerate=False, mode="cai-chat", end_of_turn=""):
stop_at_newline = generation_params['stop_at_newline']
max_new_tokens = generation_params['max_new_tokens']
chat_prompt_size = generation_params['chat_prompt_size']
chat_generation_attempts = generation_params['chat_generation_attempts']
def chatbot_wrapper(text, generate_params, name1, name2, context, regenerate=False, mode="cai-chat", end_of_turn=""):
stop_at_newline = generate_params['stop_at_newline']
max_new_tokens = generate_params['max_new_tokens']
chat_prompt_size = generate_params['chat_prompt_size']
chat_generation_attempts = generate_params['chat_generation_attempts']
just_started = True
eos_token = '\n' if stop_at_newline else None
@@ -136,7 +136,7 @@ def chatbot_wrapper(text, generation_params, name1, name2, context, regenerate=F
cumulative_reply = ''
for i in range(chat_generation_attempts):
reply = None
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generation_params, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]):
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generate_params, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]):
reply = cumulative_reply + reply
# Extracting the reply
@@ -165,11 +165,11 @@ def chatbot_wrapper(text, generation_params, name1, name2, context, regenerate=F
yield shared.history['visible']
def impersonate_wrapper(text, generation_params, name1, name2, context, mode="cai-chat", end_of_turn=""):
stop_at_newline = generation_params['stop_at_newline']
max_new_tokens = generation_params['max_new_tokens']
chat_prompt_size = generation_params['chat_prompt_size']
chat_generation_attempts = generation_params['chat_generation_attempts']
def impersonate_wrapper(text, generate_params, name1, name2, context, mode="cai-chat", end_of_turn=""):
stop_at_newline = generate_params['stop_at_newline']
max_new_tokens = generate_params['max_new_tokens']
chat_prompt_size = generate_params['chat_prompt_size']
chat_generation_attempts = generate_params['chat_generation_attempts']
eos_token = '\n' if stop_at_newline else None
@@ -184,7 +184,7 @@ def impersonate_wrapper(text, generation_params, name1, name2, context, mode="ca
cumulative_reply = ''
for i in range(chat_generation_attempts):
reply = None
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generation_params, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]):
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", generate_params, eos_token=eos_token, stopping_strings=[f"\n{name1}:", f"\n{name2}:"]):
reply = cumulative_reply + reply
reply, next_character_found = extract_message_from_reply(reply, name1, name2, stop_at_newline)
yield reply
@@ -196,11 +196,11 @@ def impersonate_wrapper(text, generation_params, name1, name2, context, mode="ca
yield reply
def cai_chatbot_wrapper(text, generation_params, name1, name2, context, mode="cai-chat", end_of_turn=""):
for history in chatbot_wrapper(text, generation_params, name1, name2, context, regenerate=False, mode=mode, end_of_turn=end_of_turn):
def cai_chatbot_wrapper(text, generate_params, name1, name2, context, mode="cai-chat", end_of_turn=""):
for history in chatbot_wrapper(text, generate_params, name1, name2, context, regenerate=False, mode=mode, end_of_turn=end_of_turn):
yield chat_html_wrapper(history, name1, name2, mode)
def regenerate_wrapper(text, generation_params, name1, name2, context, mode="cai-chat", end_of_turn=""):
def regenerate_wrapper(text, generate_params, name1, name2, context, mode="cai-chat", end_of_turn=""):
if (shared.character != 'None' and len(shared.history['visible']) == 1) or len(shared.history['internal']) == 0:
yield chat_html_wrapper(shared.history['visible'], name1, name2, mode)
else:
@@ -208,7 +208,7 @@ def regenerate_wrapper(text, generation_params, name1, name2, context, mode="cai
last_internal = shared.history['internal'].pop()
# Yield '*Is typing...*'
yield chat_html_wrapper(shared.history['visible']+[[last_visible[0], shared.processing_message]], name1, name2, mode)
for history in chatbot_wrapper(last_internal[0], generation_params, name1, name2, context, regenerate=True, mode=mode, end_of_turn=end_of_turn):
for history in chatbot_wrapper(last_internal[0], generate_params, name1, name2, context, regenerate=True, mode=mode, end_of_turn=end_of_turn):
shared.history['visible'][-1] = [last_visible[0], history[-1][1]]
yield chat_html_wrapper(shared.history['visible'], name1, name2, mode)

View File

@@ -102,10 +102,10 @@ def set_manual_seed(seed):
def stop_everything_event():
shared.stop_everything = True
def generate_reply(question, generation_params, eos_token=None, stopping_strings=[]):
max_new_tokens = generation_params['max_new_tokens']
seed = generation_params['seed']
print(generation_params)
def generate_reply(question, generate_params, eos_token=None, stopping_strings=[]):
max_new_tokens = generate_params['max_new_tokens']
seed = generate_params['seed']
print(generate_params)
print('---------------')
clear_torch_cache()
set_manual_seed(seed)
@@ -124,8 +124,8 @@ def generate_reply(question, generation_params, eos_token=None, stopping_strings
if any((shared.is_RWKV, shared.is_llamacpp)):
for k in ['temperature', 'top_p', 'top_k', 'repetition_penalty']:
updated_params[k] = generation_params[k]
updated_params["token_count"] = generation_params["max_new_tokens"]
updated_params[k] = generate_params[k]
updated_params["token_count"] = generate_params["max_new_tokens"]
try:
if shared.args.no_stream:
@@ -173,14 +173,14 @@ def generate_reply(question, generation_params, eos_token=None, stopping_strings
updated_params["eos_token_id"] = eos_token_ids
updated_params["stopping_criteria"] = stopping_criteria_list
for k in ["do_sample", "temperature", "top_p", "typical_p", "repetition_penalty", "encoder_repetition_penalty", "top_k", "min_length", "no_repeat_ngram_size", "num_beams", "penalty_alpha", "length_penalty", "early_stopping"]:
updated_params[k] = generation_params[k]
updated_params[k] = generate_params[k]
if shared.args.no_stream:
updated_params["min_length"] = 0
else:
for k in ["do_sample", "temperature"]:
updated_params[k] = generation_params[k]
updated_params["stop"] = generation_params["eos_token_ids"][-1]
updated_params[k] = generate_params[k]
updated_params["stop"] = generate_params["eos_token_ids"][-1]
if not shared.args.no_stream:
updated_params["max_new_tokens"] = 8
print(updated_params)