|
|
@@ -115,18 +115,14 @@ def chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical
|
|
|
visible_text = visible_text.replace('\n', '<br>')
|
|
|
text = apply_extensions(text, "input")
|
|
|
|
|
|
+ if custom_generate_chat_prompt is None:
|
|
|
+ prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size)
|
|
|
+ else:
|
|
|
+ prompt = custom_generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size)
|
|
|
+
|
|
|
# Generate
|
|
|
reply = ''
|
|
|
for i in range(chat_generation_attempts):
|
|
|
-
|
|
|
- # The prompt needs to be generated here because, as the reply
|
|
|
- # grows, it may become necessary to remove more old messages to
|
|
|
- # fit into the 2048 tokens window.
|
|
|
- if custom_generate_chat_prompt is None:
|
|
|
- prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size-len(encode(' '+reply)[0]))
|
|
|
- else:
|
|
|
- prompt = custom_generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size-len(encode(' '+reply)[0]))
|
|
|
-
|
|
|
for reply in generate_reply(f"{prompt}{' ' if len(reply) > 0 else ''}{reply}", max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=eos_token, stopping_string=f"\n{name1}:"):
|
|
|
|
|
|
# Extracting the reply
|
|
|
@@ -160,10 +156,10 @@ def impersonate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typ
|
|
|
if 'pygmalion' in shared.model_name.lower():
|
|
|
name1 = "You"
|
|
|
|
|
|
+ prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size, impersonate=True)
|
|
|
|
|
|
reply = ''
|
|
|
for i in range(chat_generation_attempts):
|
|
|
- prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size-len(encode(' '+reply)[0]), impersonate=True)
|
|
|
for reply in generate_reply(prompt+reply, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=eos_token, stopping_string=f"\n{name2}:"):
|
|
|
reply, next_character_found, substring_found = extract_message_from_reply(prompt, reply, name1, name2, check, impersonate=True)
|
|
|
if not substring_found:
|