chat.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516
  1. import base64
  2. import copy
  3. import io
  4. import json
  5. import re
  6. from datetime import datetime
  7. from pathlib import Path
  8. import yaml
  9. from PIL import Image
  10. import modules.extensions as extensions_module
  11. import modules.shared as shared
  12. from modules.extensions import apply_extensions
  13. from modules.html_generator import (chat_html_wrapper, fix_newlines,
  14. make_thumbnail)
  15. from modules.text_generation import (encode, generate_reply,
  16. get_max_prompt_length)
  17. def generate_chat_prompt(user_input, state, **kwargs):
  18. impersonate = kwargs['impersonate'] if 'impersonate' in kwargs else False
  19. _continue = kwargs['_continue'] if '_continue' in kwargs else False
  20. also_return_rows = kwargs['also_return_rows'] if 'also_return_rows' in kwargs else False
  21. is_instruct = state['mode'] == 'instruct'
  22. rows = [f"{state['context'].strip()}\n"]
  23. # Finding the maximum prompt size
  24. chat_prompt_size = state['chat_prompt_size']
  25. if shared.soft_prompt:
  26. chat_prompt_size -= shared.soft_prompt_tensor.shape[1]
  27. max_length = min(get_max_prompt_length(state), chat_prompt_size)
  28. if is_instruct:
  29. prefix1 = f"{state['name1']}\n"
  30. prefix2 = f"{state['name2']}\n"
  31. else:
  32. prefix1 = f"{state['name1']}: "
  33. prefix2 = f"{state['name2']}: "
  34. i = len(shared.history['internal']) - 1
  35. while i >= 0 and len(encode(''.join(rows))[0]) < max_length:
  36. if _continue and i == len(shared.history['internal']) - 1:
  37. rows.insert(1, f"{prefix2}{shared.history['internal'][i][1]}")
  38. else:
  39. rows.insert(1, f"{prefix2}{shared.history['internal'][i][1].strip()}{state['end_of_turn']}\n")
  40. string = shared.history['internal'][i][0]
  41. if string not in ['', '<|BEGIN-VISIBLE-CHAT|>']:
  42. rows.insert(1, f"{prefix1}{string.strip()}{state['end_of_turn']}\n")
  43. i -= 1
  44. if impersonate:
  45. rows.append(f"{prefix1.strip() if not is_instruct else prefix1}")
  46. limit = 2
  47. elif _continue:
  48. limit = 3
  49. else:
  50. # Adding the user message
  51. user_input = fix_newlines(user_input)
  52. if len(user_input) > 0:
  53. rows.append(f"{prefix1}{user_input}{state['end_of_turn']}\n")
  54. # Adding the Character prefix
  55. rows.append(apply_extensions(f"{prefix2.strip() if not is_instruct else prefix2}", "bot_prefix"))
  56. limit = 3
  57. while len(rows) > limit and len(encode(''.join(rows))[0]) >= max_length:
  58. rows.pop(1)
  59. prompt = ''.join(rows)
  60. if also_return_rows:
  61. return prompt, rows
  62. else:
  63. return prompt
  64. def get_stopping_strings(state):
  65. if state['mode'] == 'instruct':
  66. stopping_strings = [f"\n{state['name1']}", f"\n{state['name2']}"]
  67. else:
  68. stopping_strings = [f"\n{state['name1']}:", f"\n{state['name2']}:"]
  69. stopping_strings += state['custom_stopping_strings']
  70. return stopping_strings
  71. def extract_message_from_reply(reply, state):
  72. next_character_found = False
  73. stopping_strings = get_stopping_strings(state)
  74. if state['stop_at_newline']:
  75. lines = reply.split('\n')
  76. reply = lines[0].strip()
  77. if len(lines) > 1:
  78. next_character_found = True
  79. else:
  80. for string in stopping_strings:
  81. idx = reply.find(string)
  82. if idx != -1:
  83. reply = reply[:idx]
  84. next_character_found = True
  85. # If something like "\nYo" is generated just before "\nYou:"
  86. # is completed, trim it
  87. if not next_character_found:
  88. for string in stopping_strings:
  89. for j in range(len(string) - 1, 0, -1):
  90. if reply[-j:] == string[:j]:
  91. reply = reply[:-j]
  92. break
  93. else:
  94. continue
  95. break
  96. reply = fix_newlines(reply)
  97. return reply, next_character_found
  98. def chatbot_wrapper(text, state, regenerate=False, _continue=False):
  99. # Defining some variables
  100. cumulative_reply = ''
  101. last_reply = [shared.history['internal'][-1][1], shared.history['visible'][-1][1]] if _continue else None
  102. just_started = True
  103. visible_text = custom_generate_chat_prompt = None
  104. eos_token = '\n' if state['stop_at_newline'] else None
  105. stopping_strings = get_stopping_strings(state)
  106. # Check if any extension wants to hijack this function call
  107. for extension, _ in extensions_module.iterator():
  108. if hasattr(extension, 'input_hijack') and extension.input_hijack['state']:
  109. extension.input_hijack['state'] = False
  110. text, visible_text = extension.input_hijack['value']
  111. if custom_generate_chat_prompt is None and hasattr(extension, 'custom_generate_chat_prompt'):
  112. custom_generate_chat_prompt = extension.custom_generate_chat_prompt
  113. if visible_text is None:
  114. visible_text = text
  115. if not _continue:
  116. text = apply_extensions(text, "input")
  117. # Generating the prompt
  118. kwargs = {'_continue': _continue}
  119. if custom_generate_chat_prompt is None:
  120. prompt = generate_chat_prompt(text, state, **kwargs)
  121. else:
  122. prompt = custom_generate_chat_prompt(text, state, **kwargs)
  123. # Yield *Is typing...*
  124. if not any((regenerate, _continue)):
  125. yield shared.history['visible'] + [[visible_text, shared.processing_message]]
  126. # Generate
  127. for i in range(state['chat_generation_attempts']):
  128. reply = None
  129. for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings):
  130. reply = cumulative_reply + reply
  131. # Extracting the reply
  132. reply, next_character_found = extract_message_from_reply(reply, state)
  133. visible_reply = re.sub("(<USER>|<user>|{{user}})", state['name1'], reply)
  134. visible_reply = apply_extensions(visible_reply, "output")
  135. # We need this global variable to handle the Stop event,
  136. # otherwise gradio gets confused
  137. if shared.stop_everything:
  138. return shared.history['visible']
  139. if just_started:
  140. just_started = False
  141. if not _continue:
  142. shared.history['internal'].append(['', ''])
  143. shared.history['visible'].append(['', ''])
  144. if _continue:
  145. sep = list(map(lambda x: ' ' if len(x) > 0 and x[-1] != ' ' else '', last_reply))
  146. shared.history['internal'][-1] = [text, f'{last_reply[0]}{sep[0]}{reply}']
  147. shared.history['visible'][-1] = [visible_text, f'{last_reply[1]}{sep[1]}{visible_reply}']
  148. else:
  149. shared.history['internal'][-1] = [text, reply]
  150. shared.history['visible'][-1] = [visible_text, visible_reply]
  151. if not shared.args.no_stream:
  152. yield shared.history['visible']
  153. if next_character_found:
  154. break
  155. if reply is not None:
  156. cumulative_reply = reply
  157. yield shared.history['visible']
  158. def impersonate_wrapper(text, state):
  159. # Defining some variables
  160. cumulative_reply = ''
  161. eos_token = '\n' if state['stop_at_newline'] else None
  162. prompt = generate_chat_prompt(text, state, impersonate=True)
  163. stopping_strings = get_stopping_strings(state)
  164. # Yield *Is typing...*
  165. yield shared.processing_message
  166. for i in range(state['chat_generation_attempts']):
  167. reply = None
  168. for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings):
  169. reply = cumulative_reply + reply
  170. reply, next_character_found = extract_message_from_reply(reply, state)
  171. yield reply
  172. if next_character_found:
  173. break
  174. if reply is not None:
  175. cumulative_reply = reply
  176. yield reply
  177. def cai_chatbot_wrapper(text, state):
  178. for history in chatbot_wrapper(text, state):
  179. yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'])
  180. def regenerate_wrapper(text, state):
  181. if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0:
  182. yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'])
  183. else:
  184. last_visible = shared.history['visible'].pop()
  185. last_internal = shared.history['internal'].pop()
  186. # Yield '*Is typing...*'
  187. yield chat_html_wrapper(shared.history['visible'] + [[last_visible[0], shared.processing_message]], state['name1'], state['name2'], state['mode'])
  188. for history in chatbot_wrapper(last_internal[0], state, regenerate=True):
  189. shared.history['visible'][-1] = [last_visible[0], history[-1][1]]
  190. yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'])
  191. def continue_wrapper(text, state):
  192. if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0:
  193. yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'])
  194. else:
  195. # Yield ' ...'
  196. yield chat_html_wrapper(shared.history['visible'][:-1] + [[shared.history['visible'][-1][0], shared.history['visible'][-1][1] + ' ...']], state['name1'], state['name2'], state['mode'])
  197. for history in chatbot_wrapper(shared.history['internal'][-1][0], state, _continue=True):
  198. yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'])
  199. def remove_last_message(name1, name2, mode):
  200. if len(shared.history['visible']) > 0 and shared.history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>':
  201. last = shared.history['visible'].pop()
  202. shared.history['internal'].pop()
  203. else:
  204. last = ['', '']
  205. return chat_html_wrapper(shared.history['visible'], name1, name2, mode), last[0]
  206. def send_last_reply_to_input():
  207. if len(shared.history['internal']) > 0:
  208. return shared.history['internal'][-1][1]
  209. else:
  210. return ''
  211. def replace_last_reply(text, name1, name2, mode):
  212. if len(shared.history['visible']) > 0:
  213. shared.history['visible'][-1][1] = text
  214. shared.history['internal'][-1][1] = apply_extensions(text, "input")
  215. return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
  216. def send_dummy_message(text, name1, name2, mode):
  217. shared.history['visible'].append([text, ''])
  218. shared.history['internal'].append([apply_extensions(text, "input"), ''])
  219. return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
  220. def send_dummy_reply(text, name1, name2, mode):
  221. if len(shared.history['visible']) > 0 and not shared.history['visible'][-1][1] == '':
  222. shared.history['visible'].append(['', ''])
  223. shared.history['internal'].append(['', ''])
  224. shared.history['visible'][-1][1] = text
  225. shared.history['internal'][-1][1] = apply_extensions(text, "input")
  226. return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
  227. def clear_html():
  228. return chat_html_wrapper([], "", "")
  229. def clear_chat_log(name1, name2, greeting, mode):
  230. shared.history['visible'] = []
  231. shared.history['internal'] = []
  232. if greeting != '':
  233. shared.history['internal'] += [['<|BEGIN-VISIBLE-CHAT|>', greeting]]
  234. shared.history['visible'] += [['', apply_extensions(greeting, "output")]]
  235. # Save cleared logs
  236. save_history(mode)
  237. return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
  238. def redraw_html(name1, name2, mode):
  239. return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
  240. def tokenize_dialogue(dialogue, name1, name2, mode):
  241. history = []
  242. messages = []
  243. dialogue = re.sub('<START>', '', dialogue)
  244. dialogue = re.sub('<start>', '', dialogue)
  245. dialogue = re.sub('(\n|^)[Aa]non:', '\\1You:', dialogue)
  246. dialogue = re.sub('(\n|^)\[CHARACTER\]:', f'\\g<1>{name2}:', dialogue)
  247. idx = [m.start() for m in re.finditer(f"(^|\n)({re.escape(name1)}|{re.escape(name2)}):", dialogue)]
  248. if len(idx) == 0:
  249. return history
  250. for i in range(len(idx) - 1):
  251. messages.append(dialogue[idx[i]:idx[i + 1]].strip())
  252. messages.append(dialogue[idx[-1]:].strip())
  253. entry = ['', '']
  254. for i in messages:
  255. if i.startswith(f'{name1}:'):
  256. entry[0] = i[len(f'{name1}:'):].strip()
  257. elif i.startswith(f'{name2}:'):
  258. entry[1] = i[len(f'{name2}:'):].strip()
  259. if not (len(entry[0]) == 0 and len(entry[1]) == 0):
  260. history.append(entry)
  261. entry = ['', '']
  262. print("\033[1;32;1m\nDialogue tokenized to:\033[0;37;0m\n", end='')
  263. for row in history:
  264. for column in row:
  265. print("\n")
  266. for line in column.strip().split('\n'):
  267. print("| " + line + "\n")
  268. print("|\n")
  269. print("------------------------------")
  270. return history
  271. def save_history(mode, timestamp=False):
  272. # Instruct mode histories should not be saved as if
  273. # Alpaca or Vicuna were characters
  274. if mode == 'instruct':
  275. if not timestamp:
  276. return
  277. fname = f"Instruct_{datetime.now().strftime('%Y%m%d-%H%M%S')}.json"
  278. else:
  279. if timestamp:
  280. fname = f"{shared.character}_{datetime.now().strftime('%Y%m%d-%H%M%S')}.json"
  281. else:
  282. fname = f"{shared.character}_persistent.json"
  283. if not Path('logs').exists():
  284. Path('logs').mkdir()
  285. with open(Path(f'logs/{fname}'), 'w', encoding='utf-8') as f:
  286. f.write(json.dumps({'data': shared.history['internal'], 'data_visible': shared.history['visible']}, indent=2))
  287. return Path(f'logs/{fname}')
  288. def load_history(file, name1, name2):
  289. file = file.decode('utf-8')
  290. try:
  291. j = json.loads(file)
  292. if 'data' in j:
  293. shared.history['internal'] = j['data']
  294. if 'data_visible' in j:
  295. shared.history['visible'] = j['data_visible']
  296. else:
  297. shared.history['visible'] = copy.deepcopy(shared.history['internal'])
  298. except:
  299. shared.history['internal'] = tokenize_dialogue(file, name1, name2)
  300. shared.history['visible'] = copy.deepcopy(shared.history['internal'])
  301. def replace_character_names(text, name1, name2):
  302. text = text.replace('{{user}}', name1).replace('{{char}}', name2)
  303. return text.replace('<USER>', name1).replace('<BOT>', name2)
  304. def build_pygmalion_style_context(data):
  305. context = ""
  306. if 'char_persona' in data and data['char_persona'] != '':
  307. context += f"{data['char_name']}'s Persona: {data['char_persona']}\n"
  308. if 'world_scenario' in data and data['world_scenario'] != '':
  309. context += f"Scenario: {data['world_scenario']}\n"
  310. context = f"{context.strip()}\n<START>\n"
  311. return context
  312. def generate_pfp_cache(character):
  313. cache_folder = Path("cache")
  314. if not cache_folder.exists():
  315. cache_folder.mkdir()
  316. for path in [Path(f"characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]:
  317. if path.exists():
  318. img = make_thumbnail(Image.open(path))
  319. img.save(Path('cache/pfp_character.png'), format='PNG')
  320. return img
  321. return None
  322. def load_character(character, name1, name2, mode):
  323. shared.character = character
  324. context = greeting = end_of_turn = ""
  325. greeting_field = 'greeting'
  326. picture = None
  327. # Deleting the profile picture cache, if any
  328. if Path("cache/pfp_character.png").exists():
  329. Path("cache/pfp_character.png").unlink()
  330. if character != 'None':
  331. folder = 'characters' if not mode == 'instruct' else 'characters/instruction-following'
  332. picture = generate_pfp_cache(character)
  333. for extension in ["yml", "yaml", "json"]:
  334. filepath = Path(f'{folder}/{character}.{extension}')
  335. if filepath.exists():
  336. break
  337. file_contents = open(filepath, 'r', encoding='utf-8').read()
  338. data = json.loads(file_contents) if extension == "json" else yaml.safe_load(file_contents)
  339. if 'your_name' in data and data['your_name'] != '':
  340. name1 = data['your_name']
  341. name2 = data['name'] if 'name' in data else data['char_name']
  342. for field in ['context', 'greeting', 'example_dialogue', 'char_persona', 'char_greeting', 'world_scenario']:
  343. if field in data:
  344. data[field] = replace_character_names(data[field], name1, name2)
  345. if 'context' in data:
  346. context = f"{data['context'].strip()}\n\n"
  347. elif "char_persona" in data:
  348. context = build_pygmalion_style_context(data)
  349. greeting_field = 'char_greeting'
  350. if 'example_dialogue' in data:
  351. context += f"{data['example_dialogue'].strip()}\n"
  352. if greeting_field in data:
  353. greeting = data[greeting_field]
  354. if 'end_of_turn' in data:
  355. end_of_turn = data['end_of_turn']
  356. else:
  357. context = shared.settings['context']
  358. name2 = shared.settings['name2']
  359. greeting = shared.settings['greeting']
  360. end_of_turn = shared.settings['end_of_turn']
  361. if mode != 'instruct':
  362. shared.history['internal'] = []
  363. shared.history['visible'] = []
  364. if Path(f'logs/{shared.character}_persistent.json').exists():
  365. load_history(open(Path(f'logs/{shared.character}_persistent.json'), 'rb').read(), name1, name2)
  366. else:
  367. # Insert greeting if it exists
  368. if greeting != "":
  369. shared.history['internal'] += [['<|BEGIN-VISIBLE-CHAT|>', greeting]]
  370. shared.history['visible'] += [['', apply_extensions(greeting, "output")]]
  371. # Create .json log files since they don't already exist
  372. save_history(mode)
  373. return name1, name2, picture, greeting, context, end_of_turn, chat_html_wrapper(shared.history['visible'], name1, name2, mode)
  374. def load_default_history(name1, name2):
  375. load_character("None", name1, name2, "chat")
  376. def upload_character(json_file, img, tavern=False):
  377. json_file = json_file if type(json_file) == str else json_file.decode('utf-8')
  378. data = json.loads(json_file)
  379. outfile_name = data["char_name"]
  380. i = 1
  381. while Path(f'characters/{outfile_name}.json').exists():
  382. outfile_name = f'{data["char_name"]}_{i:03d}'
  383. i += 1
  384. if tavern:
  385. outfile_name = f'TavernAI-{outfile_name}'
  386. with open(Path(f'characters/{outfile_name}.json'), 'w', encoding='utf-8') as f:
  387. f.write(json_file)
  388. if img is not None:
  389. img = Image.open(io.BytesIO(img))
  390. img.save(Path(f'characters/{outfile_name}.png'))
  391. print(f'New character saved to "characters/{outfile_name}.json".')
  392. return outfile_name
  393. def upload_tavern_character(img, name1, name2):
  394. _img = Image.open(io.BytesIO(img))
  395. _img.getexif()
  396. decoded_string = base64.b64decode(_img.info['chara'])
  397. _json = json.loads(decoded_string)
  398. _json = {"char_name": _json['name'], "char_persona": _json['description'], "char_greeting": _json["first_mes"], "example_dialogue": _json['mes_example'], "world_scenario": _json['scenario']}
  399. return upload_character(json.dumps(_json), img, tavern=True)
  400. def upload_your_profile_picture(img, name1, name2, mode):
  401. cache_folder = Path("cache")
  402. if not cache_folder.exists():
  403. cache_folder.mkdir()
  404. if img is None:
  405. if Path("cache/pfp_me.png").exists():
  406. Path("cache/pfp_me.png").unlink()
  407. else:
  408. img = make_thumbnail(img)
  409. img.save(Path('cache/pfp_me.png'))
  410. print('Profile picture saved to "cache/pfp_me.png"')
  411. return chat_html_wrapper(shared.history['visible'], name1, name2, mode, reset_cache=True)