|
@@ -24,16 +24,16 @@ def clean_chat_message(text):
|
|
|
text = text.strip()
|
|
text = text.strip()
|
|
|
return text
|
|
return text
|
|
|
|
|
|
|
|
-def generate_chat_prompt(user_input, tokens, name1, name2, context, chat_prompt_size, impersonate=False):
|
|
|
|
|
|
|
+def generate_chat_prompt(user_input, max_new_tokens, name1, name2, context, chat_prompt_size, impersonate=False):
|
|
|
user_input = clean_chat_message(user_input)
|
|
user_input = clean_chat_message(user_input)
|
|
|
rows = [f"{context.strip()}\n"]
|
|
rows = [f"{context.strip()}\n"]
|
|
|
|
|
|
|
|
if shared.soft_prompt:
|
|
if shared.soft_prompt:
|
|
|
chat_prompt_size -= shared.soft_prompt_tensor.shape[1]
|
|
chat_prompt_size -= shared.soft_prompt_tensor.shape[1]
|
|
|
- max_length = min(get_max_prompt_length(tokens), chat_prompt_size)
|
|
|
|
|
|
|
+ max_length = min(get_max_prompt_length(max_new_tokens), chat_prompt_size)
|
|
|
|
|
|
|
|
i = len(shared.history['internal'])-1
|
|
i = len(shared.history['internal'])-1
|
|
|
- while i >= 0 and len(encode(''.join(rows), tokens)[0]) < max_length:
|
|
|
|
|
|
|
+ while i >= 0 and len(encode(''.join(rows), max_new_tokens)[0]) < max_length:
|
|
|
rows.insert(1, f"{name2}: {shared.history['internal'][i][1].strip()}\n")
|
|
rows.insert(1, f"{name2}: {shared.history['internal'][i][1].strip()}\n")
|
|
|
if not (shared.history['internal'][i][0] == '<|BEGIN-VISIBLE-CHAT|>'):
|
|
if not (shared.history['internal'][i][0] == '<|BEGIN-VISIBLE-CHAT|>'):
|
|
|
rows.insert(1, f"{name1}: {shared.history['internal'][i][0].strip()}\n")
|
|
rows.insert(1, f"{name1}: {shared.history['internal'][i][0].strip()}\n")
|
|
@@ -47,7 +47,7 @@ def generate_chat_prompt(user_input, tokens, name1, name2, context, chat_prompt_
|
|
|
rows.append(f"{name1}:")
|
|
rows.append(f"{name1}:")
|
|
|
limit = 2
|
|
limit = 2
|
|
|
|
|
|
|
|
- while len(rows) > limit and len(encode(''.join(rows), tokens)[0]) >= max_length:
|
|
|
|
|
|
|
+ while len(rows) > limit and len(encode(''.join(rows), max_new_tokens)[0]) >= max_length:
|
|
|
rows.pop(1)
|
|
rows.pop(1)
|
|
|
|
|
|
|
|
prompt = ''.join(rows)
|
|
prompt = ''.join(rows)
|
|
@@ -95,7 +95,7 @@ def generate_chat_picture(picture, name1, name2):
|
|
|
def stop_everything_event():
|
|
def stop_everything_event():
|
|
|
shared.stop_everything = True
|
|
shared.stop_everything = True
|
|
|
|
|
|
|
|
-def chatbot_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, picture=None):
|
|
|
|
|
|
|
+def chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, picture=None):
|
|
|
shared.stop_everything = False
|
|
shared.stop_everything = False
|
|
|
just_started = True
|
|
just_started = True
|
|
|
eos_token = '\n' if check else None
|
|
eos_token = '\n' if check else None
|
|
@@ -110,10 +110,10 @@ def chatbot_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p,
|
|
|
if shared.args.chat:
|
|
if shared.args.chat:
|
|
|
visible_text = visible_text.replace('\n', '<br>')
|
|
visible_text = visible_text.replace('\n', '<br>')
|
|
|
text = apply_extensions(text, "input")
|
|
text = apply_extensions(text, "input")
|
|
|
- prompt = generate_chat_prompt(text, tokens, name1, name2, context, chat_prompt_size)
|
|
|
|
|
|
|
+ prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size)
|
|
|
|
|
|
|
|
# Generate
|
|
# Generate
|
|
|
- for reply in generate_reply(prompt, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=eos_token, stopping_string=f"\n{name1}:"):
|
|
|
|
|
|
|
+ for reply in generate_reply(prompt, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=eos_token, stopping_string=f"\n{name1}:"):
|
|
|
|
|
|
|
|
# Extracting the reply
|
|
# Extracting the reply
|
|
|
reply, next_character_found, substring_found = extract_message_from_reply(prompt, reply, name2, name1, check, extensions=True)
|
|
reply, next_character_found, substring_found = extract_message_from_reply(prompt, reply, name2, name1, check, extensions=True)
|
|
@@ -138,15 +138,15 @@ def chatbot_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p,
|
|
|
break
|
|
break
|
|
|
yield shared.history['visible']
|
|
yield shared.history['visible']
|
|
|
|
|
|
|
|
-def impersonate_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, picture=None):
|
|
|
|
|
|
|
+def impersonate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, picture=None):
|
|
|
eos_token = '\n' if check else None
|
|
eos_token = '\n' if check else None
|
|
|
|
|
|
|
|
if 'pygmalion' in shared.model_name.lower():
|
|
if 'pygmalion' in shared.model_name.lower():
|
|
|
name1 = "You"
|
|
name1 = "You"
|
|
|
|
|
|
|
|
- prompt = generate_chat_prompt(text, tokens, name1, name2, context, chat_prompt_size, impersonate=True)
|
|
|
|
|
|
|
+ prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size, impersonate=True)
|
|
|
|
|
|
|
|
- for reply in generate_reply(prompt, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=eos_token, stopping_string=f"\n{name2}:"):
|
|
|
|
|
|
|
+ for reply in generate_reply(prompt, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=eos_token, stopping_string=f"\n{name2}:"):
|
|
|
reply, next_character_found, substring_found = extract_message_from_reply(prompt, reply, name1, name2, check, extensions=False)
|
|
reply, next_character_found, substring_found = extract_message_from_reply(prompt, reply, name1, name2, check, extensions=False)
|
|
|
if not substring_found:
|
|
if not substring_found:
|
|
|
yield reply
|
|
yield reply
|
|
@@ -154,11 +154,11 @@ def impersonate_wrapper(text, tokens, do_sample, max_new_tokens, temperature, to
|
|
|
break
|
|
break
|
|
|
yield reply
|
|
yield reply
|
|
|
|
|
|
|
|
-def cai_chatbot_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, picture=None):
|
|
|
|
|
- for _history in chatbot_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, picture):
|
|
|
|
|
|
|
+def cai_chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, picture=None):
|
|
|
|
|
+ for _history in chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, picture):
|
|
|
yield generate_chat_html(_history, name1, name2, shared.character)
|
|
yield generate_chat_html(_history, name1, name2, shared.character)
|
|
|
|
|
|
|
|
-def regenerate_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, picture=None):
|
|
|
|
|
|
|
+def regenerate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, picture=None):
|
|
|
if shared.character != 'None' and len(shared.history['visible']) == 1:
|
|
if shared.character != 'None' and len(shared.history['visible']) == 1:
|
|
|
if shared.args.cai_chat:
|
|
if shared.args.cai_chat:
|
|
|
yield generate_chat_html(shared.history['visible'], name1, name2, shared.character)
|
|
yield generate_chat_html(shared.history['visible'], name1, name2, shared.character)
|
|
@@ -168,7 +168,7 @@ def regenerate_wrapper(text, tokens, do_sample, max_new_tokens, temperature, top
|
|
|
last_visible = shared.history['visible'].pop()
|
|
last_visible = shared.history['visible'].pop()
|
|
|
last_internal = shared.history['internal'].pop()
|
|
last_internal = shared.history['internal'].pop()
|
|
|
|
|
|
|
|
- for _history in chatbot_wrapper(last_internal[0], tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, picture):
|
|
|
|
|
|
|
+ for _history in chatbot_wrapper(last_internal[0], max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, picture):
|
|
|
if shared.args.cai_chat:
|
|
if shared.args.cai_chat:
|
|
|
shared.history['visible'][-1] = [last_visible[0], _history[-1][1]]
|
|
shared.history['visible'][-1] = [last_visible[0], _history[-1][1]]
|
|
|
yield generate_chat_html(shared.history['visible'], name1, name2, shared.character)
|
|
yield generate_chat_html(shared.history['visible'], name1, name2, shared.character)
|