server.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. import re
  2. import time
  3. import glob
  4. from sys import exit
  5. import torch
  6. import argparse
  7. import json
  8. from pathlib import Path
  9. import gradio as gr
  10. import transformers
  11. from html_generator import *
  12. from transformers import AutoTokenizer, AutoModelForCausalLM
  13. import warnings
  14. import gc
  15. from tqdm import tqdm
  16. transformers.logging.set_verbosity_error()
  17. parser = argparse.ArgumentParser()
  18. parser.add_argument('--model', type=str, help='Name of the model to load by default.')
  19. parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.')
  20. parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode.')
  21. parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file profile.png or profile.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture.')
  22. parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
  23. parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
  24. parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
  25. parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
  26. parser.add_argument('--max-gpu-memory', type=int, help='Maximum memory in GiB to allocate to the GPU when loading the model. This is useful if you get out of memory errors while trying to generate text. Must be an integer number.')
  27. parser.add_argument('--no-listen', action='store_true', help='Make the web UI unreachable from your local network.')
  28. parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the text output in real time. This slightly improves the text generation performance.')
  29. parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example.')
  30. args = parser.parse_args()
  31. loaded_preset = None
  32. available_models = sorted(set([item.replace('.pt', '') for item in map(lambda x : str(x.name), list(Path('models/').glob('*'))+list(Path('torch-dumps/').glob('*'))) if not item.endswith('.txt')]), key=str.lower)
  33. available_presets = sorted(set(map(lambda x : str(x.name).split('.')[0], Path('presets').glob('*.txt'))), key=str.lower)
  34. available_characters = sorted(set(map(lambda x : str(x.name).split('.')[0], Path('characters').glob('*.json'))), key=str.lower)
  35. settings = {
  36. 'max_new_tokens': 200,
  37. 'max_new_tokens_min': 1,
  38. 'max_new_tokens_max': 2000,
  39. 'preset': 'NovelAI-Sphinx Moth',
  40. 'name1': 'Person 1',
  41. 'name2': 'Person 2',
  42. 'name1_pygmalion': 'You',
  43. 'name2_pygmalion': 'Kawaii',
  44. 'context': 'This is a conversation between two people.',
  45. 'context_pygmalion': 'This is a conversation between two people.\n<START>',
  46. 'prompt': 'Common sense questions and answers\n\nQuestion: \nFactual answer:',
  47. 'prompt_gpt4chan': '-----\n--- 865467536\nInput text\n--- 865467537\n',
  48. 'stop_at_newline': True,
  49. 'stop_at_newline_pygmalion': False,
  50. }
  51. if args.settings is not None and Path(args.settings).exists():
  52. with open(Path(args.settings), 'r') as f:
  53. new_settings = json.load(f)
  54. for item in new_settings:
  55. if item in settings:
  56. settings[item] = new_settings[item]
  57. def load_model(model_name):
  58. print(f"Loading {model_name}...")
  59. t0 = time.time()
  60. # Default settings
  61. if not (args.cpu or args.load_in_8bit or args.auto_devices or args.disk or args.max_gpu_memory is not None):
  62. if Path(f"torch-dumps/{model_name}.pt").exists():
  63. print("Loading in .pt format...")
  64. model = torch.load(Path(f"torch-dumps/{model_name}.pt"))
  65. elif model_name.lower().startswith(('gpt-neo', 'opt-', 'galactica')) and any(size in model_name.lower() for size in ('13b', '20b', '30b')):
  66. model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), device_map='auto', load_in_8bit=True)
  67. else:
  68. model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.float16).cuda()
  69. # Custom
  70. else:
  71. settings = ["low_cpu_mem_usage=True"]
  72. command = "AutoModelForCausalLM.from_pretrained"
  73. if args.cpu:
  74. settings.append("torch_dtype=torch.float32")
  75. else:
  76. settings.append("device_map='auto'")
  77. if args.max_gpu_memory is not None:
  78. settings.append(f"max_memory={{0: '{args.max_gpu_memory}GiB', 'cpu': '99GiB'}}")
  79. if args.disk:
  80. settings.append("offload_folder='cache'")
  81. if args.load_in_8bit:
  82. settings.append("load_in_8bit=True")
  83. else:
  84. settings.append("torch_dtype=torch.float16")
  85. settings = ', '.join(set(settings))
  86. command = f"{command}(Path(f'models/{model_name}'), {settings})"
  87. model = eval(command)
  88. # Loading the tokenizer
  89. if model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')) and Path(f"models/gpt-j-6B/").exists():
  90. tokenizer = AutoTokenizer.from_pretrained(Path("models/gpt-j-6B/"))
  91. else:
  92. tokenizer = AutoTokenizer.from_pretrained(Path(f"models/{model_name}/"))
  93. tokenizer.truncation_side = 'left'
  94. print(f"Loaded the model in {(time.time()-t0):.2f} seconds.")
  95. return model, tokenizer
  96. # Removes empty replies from gpt4chan outputs
  97. def fix_gpt4chan(s):
  98. for i in range(10):
  99. s = re.sub("--- [0-9]*\n>>[0-9]*\n---", "---", s)
  100. s = re.sub("--- [0-9]*\n *\n---", "---", s)
  101. s = re.sub("--- [0-9]*\n\n\n---", "---", s)
  102. return s
  103. # Fix the LaTeX equations in galactica
  104. def fix_galactica(s):
  105. s = s.replace(r'\[', r'$')
  106. s = s.replace(r'\]', r'$')
  107. s = s.replace(r'\(', r'$')
  108. s = s.replace(r'\)', r'$')
  109. s = s.replace(r'$$', r'$')
  110. return s
  111. def encode(prompt, tokens):
  112. if not args.cpu:
  113. torch.cuda.empty_cache()
  114. input_ids = tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=2048-tokens).cuda()
  115. else:
  116. input_ids = tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=2048-tokens)
  117. return input_ids
  118. def decode(output_ids):
  119. reply = tokenizer.decode(output_ids, skip_special_tokens=True)
  120. reply = reply.replace(r'<|endoftext|>', '')
  121. return reply
  122. def formatted_outputs(reply, model_name):
  123. if not (args.chat or args.cai_chat):
  124. if model_name.lower().startswith('galactica'):
  125. reply = fix_galactica(reply)
  126. return reply, reply, generate_basic_html(reply)
  127. elif model_name.lower().startswith('gpt4chan'):
  128. reply = fix_gpt4chan(reply)
  129. return reply, 'Only applicable for GALACTICA models.', generate_4chan_html(reply)
  130. else:
  131. return reply, 'Only applicable for GALACTICA models.', generate_basic_html(reply)
  132. else:
  133. return reply
  134. def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None):
  135. global model, tokenizer, model_name, loaded_preset, preset
  136. if selected_model != model_name:
  137. model_name = selected_model
  138. model = None
  139. tokenizer = None
  140. if not args.cpu:
  141. gc.collect()
  142. torch.cuda.empty_cache()
  143. model, tokenizer = load_model(model_name)
  144. if inference_settings != loaded_preset:
  145. with open(Path(f'presets/{inference_settings}.txt'), 'r') as infile:
  146. preset = infile.read()
  147. loaded_preset = inference_settings
  148. cuda = "" if args.cpu else ".cuda()"
  149. n = None if eos_token is None else tokenizer.encode(eos_token, return_tensors='pt')[0][-1]
  150. # Generate the entire reply at once
  151. if args.no_stream:
  152. input_ids = encode(question, tokens)
  153. output = eval(f"model.generate(input_ids, eos_token_id={n}, {preset}){cuda}")
  154. reply = decode(output[0])
  155. yield formatted_outputs(reply, model_name)
  156. # Generate the reply 1 token at a time
  157. else:
  158. yield formatted_outputs(question, model_name)
  159. input_ids = encode(question, 1)
  160. preset = preset.replace('max_new_tokens=tokens', 'max_new_tokens=1')
  161. for i in tqdm(range(tokens)):
  162. output = eval(f"model.generate(input_ids, {preset}){cuda}")
  163. reply = decode(output[0])
  164. if eos_token is not None and reply[-1] == eos_token:
  165. break
  166. yield formatted_outputs(reply, model_name)
  167. input_ids = output
  168. # Choosing the default model
  169. if args.model is not None:
  170. model_name = args.model
  171. else:
  172. if len(available_models) == 0:
  173. print("No models are available! Please download at least one.")
  174. exit(0)
  175. elif len(available_models) == 1:
  176. i = 0
  177. else:
  178. print("The following models are available:\n")
  179. for i,model in enumerate(available_models):
  180. print(f"{i+1}. {model}")
  181. print(f"\nWhich one do you want to load? 1-{len(available_models)}\n")
  182. i = int(input())-1
  183. print()
  184. model_name = available_models[i]
  185. model, tokenizer = load_model(model_name)
  186. # UI settings
  187. if model_name.lower().startswith('gpt4chan'):
  188. default_text = settings['prompt_gpt4chan']
  189. else:
  190. default_text = settings['prompt']
  191. description = f"\n\n# Text generation lab\nGenerate text using Large Language Models.\n"
  192. css = ".my-4 {margin-top: 0} .py-6 {padding-top: 2.5rem}"
  193. if args.chat or args.cai_chat:
  194. history = []
  195. character = None
  196. # This gets the new line characters right.
  197. def clean_chat_message(text):
  198. text = text.replace('\n', '\n\n')
  199. text = re.sub(r"\n{3,}", "\n\n", text)
  200. text = text.strip()
  201. return text
  202. def generate_chat_prompt(text, tokens, name1, name2, context):
  203. text = clean_chat_message(text)
  204. rows = [f"{context}\n\n"]
  205. i = len(history)-1
  206. while i >= 0 and len(encode(''.join(rows), tokens)[0]) < 2048-tokens:
  207. rows.insert(1, f"{name2}: {history[i][1].strip()}\n")
  208. rows.insert(1, f"{name1}: {history[i][0].strip()}\n")
  209. i -= 1
  210. rows.append(f"{name1}: {text}\n")
  211. rows.append(f"{name2}:")
  212. while len(rows) > 3 and len(encode(''.join(rows), tokens)[0]) >= 2048-tokens:
  213. rows.pop(1)
  214. rows.pop(1)
  215. question = ''.join(rows)
  216. return question
  217. def chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check):
  218. question = generate_chat_prompt(text, tokens, name1, name2, context)
  219. history.append(['', ''])
  220. eos_token = '\n' if check else None
  221. for reply in generate_reply(question, tokens, inference_settings, selected_model, eos_token=eos_token):
  222. next_character_found = False
  223. previous_idx = [m.start() for m in re.finditer(f"\n{name2}:", question)]
  224. idx = [m.start() for m in re.finditer(f"(^|\n){name2}:", reply)]
  225. idx = idx[len(previous_idx)-1]
  226. reply = reply[idx + len(f"\n{name2}:"):]
  227. if check:
  228. reply = reply.split('\n')[0].strip()
  229. else:
  230. idx = reply.find(f"\n{name1}:")
  231. if idx != -1:
  232. reply = reply[:idx]
  233. next_character_found = True
  234. reply = clean_chat_message(reply)
  235. history[-1] = [text, reply]
  236. if next_character_found:
  237. break
  238. # Prevent the chat log from flashing if something like "\nYo" is generated just
  239. # before "\nYou:" is completed
  240. tmp = f"\n{name1}:"
  241. next_character_substring_found = False
  242. for j in range(1, len(tmp)):
  243. if reply[-j:] == tmp[:j]:
  244. next_character_substring_found = True
  245. if not next_character_substring_found:
  246. yield history
  247. yield history
  248. def cai_chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check):
  249. for history in chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check):
  250. yield generate_chat_html(history, name1, name2, character)
  251. def remove_last_message(name1, name2):
  252. history.pop()
  253. if args.cai_chat:
  254. return generate_chat_html(history, name1, name2, character)
  255. else:
  256. return history
  257. def clear():
  258. global history
  259. history = []
  260. def clear_html():
  261. return generate_chat_html([], "", "", character)
  262. def redraw_html(name1, name2):
  263. global history
  264. return generate_chat_html(history, name1, name2, character)
  265. def save_history():
  266. if not Path('logs').exists():
  267. Path('logs').mkdir()
  268. with open(Path('logs/conversation.json'), 'w') as f:
  269. f.write(json.dumps({'data': history}))
  270. return Path('logs/conversation.json')
  271. def load_history(file):
  272. global history
  273. history = json.loads(file.decode('utf-8'))['data']
  274. def load_character(_character, name1, name2):
  275. global history, character
  276. context = ""
  277. history = []
  278. if _character != 'None':
  279. character = _character
  280. with open(Path(f'characters/{_character}.json'), 'r') as f:
  281. data = json.loads(f.read())
  282. name2 = data['char_name']
  283. if 'char_persona' in data and data['char_persona'] != '':
  284. context += f"{data['char_name']}'s Persona: {data['char_persona']}\n"
  285. if 'world_scenario' in data and data['world_scenario'] != '':
  286. context += f"Scenario: {data['world_scenario']}\n"
  287. if 'example_dialogue' in data and data['example_dialogue'] != '':
  288. context += f"{data['example_dialogue']}"
  289. context = f"{context.strip()}\n<START>"
  290. if 'char_greeting' in data:
  291. history = [['', data['char_greeting']]]
  292. else:
  293. character = None
  294. context = settings['context_pygmalion']
  295. name2 = settings['name2_pygmalion']
  296. if args.cai_chat:
  297. return name2, context, generate_chat_html(history, name1, name2, character)
  298. else:
  299. return name2, context, history
  300. suffix = '_pygmalion' if 'pygmalion' in model_name.lower() else ''
  301. context_str = settings[f'context{suffix}']
  302. name1_str = settings[f'name1{suffix}']
  303. name2_str = settings[f'name2{suffix}']
  304. stop_at_newline = settings[f'stop_at_newline{suffix}']
  305. with gr.Blocks(css=css+".h-\[40vh\] {height: 66.67vh} .gradio-container {max-width: 800px; margin-left: auto; margin-right: auto}", analytics_enabled=False) as interface:
  306. if args.cai_chat:
  307. display1 = gr.HTML(value=generate_chat_html([], "", "", character))
  308. else:
  309. display1 = gr.Chatbot()
  310. textbox = gr.Textbox(lines=2, label='Input')
  311. btn = gr.Button("Generate")
  312. with gr.Row():
  313. btn2 = gr.Button("Clear history")
  314. stop = gr.Button("Stop")
  315. btn3 = gr.Button("Remove last message")
  316. length_slider = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens'])
  317. with gr.Row():
  318. with gr.Column():
  319. model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
  320. with gr.Column():
  321. preset_menu = gr.Dropdown(choices=available_presets, value=settings['preset'], label='Settings preset')
  322. name1 = gr.Textbox(value=name1_str, lines=1, label='Your name')
  323. name2 = gr.Textbox(value=name2_str, lines=1, label='Bot\'s name')
  324. context = gr.Textbox(value=context_str, lines=2, label='Context')
  325. with gr.Row():
  326. character_menu = gr.Dropdown(choices=["None"]+available_characters, value="None", label='Character')
  327. with gr.Row():
  328. check = gr.Checkbox(value=stop_at_newline, label='Stop generating at new line character?')
  329. with gr.Row():
  330. with gr.Column():
  331. gr.Markdown("Upload chat history")
  332. upload = gr.File(type='binary')
  333. with gr.Column():
  334. gr.Markdown("Download chat history")
  335. save_btn = gr.Button(value="Click me")
  336. download = gr.File()
  337. if args.cai_chat:
  338. gen_event = btn.click(cai_chatbot_wrapper, [textbox, length_slider, preset_menu, model_menu, name1, name2, context, check], display1, show_progress=args.no_stream, api_name="textgen")
  339. gen_event2 = textbox.submit(cai_chatbot_wrapper, [textbox, length_slider, preset_menu, model_menu, name1, name2, context, check], display1, show_progress=args.no_stream)
  340. btn2.click(clear_html, [], display1, show_progress=False)
  341. else:
  342. gen_event = btn.click(chatbot_wrapper, [textbox, length_slider, preset_menu, model_menu, name1, name2, context, check], display1, show_progress=args.no_stream, api_name="textgen")
  343. gen_event2 = textbox.submit(chatbot_wrapper, [textbox, length_slider, preset_menu, model_menu, name1, name2, context, check], display1, show_progress=args.no_stream)
  344. btn2.click(lambda x: "", display1, display1, show_progress=False)
  345. btn2.click(clear)
  346. btn3.click(remove_last_message, [name1, name2], display1, show_progress=False)
  347. btn.click(lambda x: "", textbox, textbox, show_progress=False)
  348. textbox.submit(lambda x: "", textbox, textbox, show_progress=False)
  349. stop.click(None, None, None, cancels=[gen_event, gen_event2])
  350. save_btn.click(save_history, inputs=[], outputs=[download])
  351. upload.upload(load_history, [upload], [])
  352. character_menu.change(load_character, [character_menu, name1, name2], [name2, context, display1])
  353. if args.cai_chat:
  354. upload.upload(redraw_html, [name1, name2], [display1])
  355. else:
  356. upload.upload(lambda : history, [], [display1])
  357. elif args.notebook:
  358. with gr.Blocks(css=css, analytics_enabled=False) as interface:
  359. gr.Markdown(description)
  360. with gr.Tab('Raw'):
  361. textbox = gr.Textbox(value=default_text, lines=23)
  362. with gr.Tab('Markdown'):
  363. markdown = gr.Markdown()
  364. with gr.Tab('HTML'):
  365. html = gr.HTML()
  366. btn = gr.Button("Generate")
  367. stop = gr.Button("Stop")
  368. length_slider = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens'])
  369. with gr.Row():
  370. with gr.Column():
  371. model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
  372. with gr.Column():
  373. preset_menu = gr.Dropdown(choices=available_presets, value=settings['preset'], label='Settings preset')
  374. gen_event = btn.click(generate_reply, [textbox, length_slider, preset_menu, model_menu], [textbox, markdown, html], show_progress=args.no_stream, api_name="textgen")
  375. gen_event2 = textbox.submit(generate_reply, [textbox, length_slider, preset_menu, model_menu], [textbox, markdown, html], show_progress=args.no_stream)
  376. stop.click(None, None, None, cancels=[gen_event, gen_event2])
  377. else:
  378. with gr.Blocks(css=css, analytics_enabled=False) as interface:
  379. gr.Markdown(description)
  380. with gr.Row():
  381. with gr.Column():
  382. textbox = gr.Textbox(value=default_text, lines=15, label='Input')
  383. length_slider = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens'])
  384. preset_menu = gr.Dropdown(choices=available_presets, value=settings['preset'], label='Settings preset')
  385. model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
  386. btn = gr.Button("Generate")
  387. with gr.Row():
  388. with gr.Column():
  389. cont = gr.Button("Continue")
  390. with gr.Column():
  391. stop = gr.Button("Stop")
  392. with gr.Column():
  393. with gr.Tab('Raw'):
  394. output_textbox = gr.Textbox(lines=15, label='Output')
  395. with gr.Tab('Markdown'):
  396. markdown = gr.Markdown()
  397. with gr.Tab('HTML'):
  398. html = gr.HTML()
  399. gen_event = btn.click(generate_reply, [textbox, length_slider, preset_menu, model_menu], [output_textbox, markdown, html], show_progress=args.no_stream, api_name="textgen")
  400. gen_event2 = textbox.submit(generate_reply, [textbox, length_slider, preset_menu, model_menu], [output_textbox, markdown, html], show_progress=args.no_stream)
  401. cont_event = cont.click(generate_reply, [output_textbox, length_slider, preset_menu, model_menu], [output_textbox, markdown, html], show_progress=args.no_stream)
  402. stop.click(None, None, None, cancels=[gen_event, gen_event2, cont_event])
  403. interface.queue()
  404. if args.no_listen:
  405. interface.launch(share=False)
  406. else:
  407. interface.launch(share=False, server_name="0.0.0.0")