server.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. import re
  2. import time
  3. import glob
  4. from sys import exit
  5. import torch
  6. import argparse
  7. from pathlib import Path
  8. import gradio as gr
  9. import transformers
  10. from html_generator import *
  11. from transformers import AutoTokenizer, AutoModelForCausalLM
  12. parser = argparse.ArgumentParser()
  13. parser.add_argument('--model', type=str, help='Name of the model to load by default.')
  14. parser.add_argument('--notebook', action='store_true', help='Launch the webui in notebook mode, where the output is written to the same text box as the input.')
  15. parser.add_argument('--chat', action='store_true', help='Launch the webui in chat mode.')
  16. parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
  17. parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
  18. parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
  19. parser.add_argument('--no-listen', action='store_true', help='Make the webui unreachable from your local network.')
  20. args = parser.parse_args()
  21. loaded_preset = None
  22. available_models = sorted(set(map(lambda x : str(x.name).replace('.pt', ''), list(Path('models/').glob('*'))+list(Path('torch-dumps/').glob('*')))))
  23. available_models = [item for item in available_models if not item.endswith('.txt')]
  24. available_models = sorted(available_models, key=str.lower)
  25. available_presets = sorted(set(map(lambda x : str(x.name).split('.')[0], list(Path('presets').glob('*.txt')))))
  26. def load_model(model_name):
  27. print(f"Loading {model_name}...")
  28. t0 = time.time()
  29. # Default settings
  30. if not (args.cpu or args.auto_devices or args.load_in_8bit):
  31. if Path(f"torch-dumps/{model_name}.pt").exists():
  32. print("Loading in .pt format...")
  33. model = torch.load(Path(f"torch-dumps/{model_name}.pt"))
  34. elif model_name.lower().startswith(('gpt-neo', 'opt-', 'galactica')) and any(size in model_name.lower() for size in ('13b', '20b', '30b')):
  35. model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), device_map='auto', load_in_8bit=True)
  36. else:
  37. model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.float16).cuda()
  38. # Custom
  39. else:
  40. settings = ["low_cpu_mem_usage=True"]
  41. cuda = ""
  42. command = "AutoModelForCausalLM.from_pretrained"
  43. if args.cpu:
  44. settings.append("torch_dtype=torch.float32")
  45. else:
  46. if args.load_in_8bit:
  47. settings.append("device_map='auto'")
  48. settings.append("load_in_8bit=True")
  49. else:
  50. settings.append("torch_dtype=torch.float16")
  51. if args.auto_devices:
  52. settings.append("device_map='auto'")
  53. else:
  54. cuda = ".cuda()"
  55. settings = ', '.join(settings)
  56. command = f"{command}(Path(f'models/{model_name}'), {settings}){cuda}"
  57. model = eval(command)
  58. # Loading the tokenizer
  59. if model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')) and Path(f"models/gpt-j-6B/").exists():
  60. tokenizer = AutoTokenizer.from_pretrained(Path("models/gpt-j-6B/"))
  61. else:
  62. tokenizer = AutoTokenizer.from_pretrained(Path(f"models/{model_name}/"))
  63. print(f"Loaded the model in {(time.time()-t0):.2f} seconds.")
  64. return model, tokenizer
  65. # Removes empty replies from gpt4chan outputs
  66. def fix_gpt4chan(s):
  67. for i in range(10):
  68. s = re.sub("--- [0-9]*\n>>[0-9]*\n---", "---", s)
  69. s = re.sub("--- [0-9]*\n *\n---", "---", s)
  70. s = re.sub("--- [0-9]*\n\n\n---", "---", s)
  71. return s
  72. # Fix the LaTeX equations in GALACTICA
  73. def fix_galactica(s):
  74. s = s.replace(r'\[', r'$')
  75. s = s.replace(r'\]', r'$')
  76. s = s.replace(r'\(', r'$')
  77. s = s.replace(r'\)', r'$')
  78. s = s.replace(r'$$', r'$')
  79. return s
  80. def generate_html(s):
  81. s = '\n'.join([f'<p style="margin-bottom: 20px">{line}</p>' for line in s.split('\n')])
  82. s = f'<div style="max-width: 600px; margin-left: auto; margin-right: auto; background-color:#eef2ff; color:#0b0f19; padding:3em; font-size:1.2em;">{s}</div>'
  83. return s
  84. def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None):
  85. global model, tokenizer, model_name, loaded_preset, preset
  86. if selected_model != model_name:
  87. model_name = selected_model
  88. model = None
  89. tokenizer = None
  90. if not args.cpu:
  91. torch.cuda.empty_cache()
  92. model, tokenizer = load_model(model_name)
  93. if inference_settings != loaded_preset:
  94. with open(Path(f'presets/{inference_settings}.txt'), 'r') as infile:
  95. preset = infile.read()
  96. loaded_preset = inference_settings
  97. if not args.cpu:
  98. torch.cuda.empty_cache()
  99. input_ids = tokenizer.encode(str(question), return_tensors='pt').cuda()
  100. cuda = ".cuda()"
  101. else:
  102. input_ids = tokenizer.encode(str(question), return_tensors='pt')
  103. cuda = ""
  104. if eos_token is None:
  105. output = eval(f"model.generate(input_ids, {preset}){cuda}")
  106. else:
  107. n = tokenizer.encode(eos_token, return_tensors='pt')[0][-1]
  108. output = eval(f"model.generate(input_ids, eos_token_id={n}, {preset}){cuda}")
  109. reply = tokenizer.decode(output[0], skip_special_tokens=True)
  110. reply = reply.replace(r'<|endoftext|>', '')
  111. if model_name.lower().startswith('galactica'):
  112. reply = fix_galactica(reply)
  113. return reply, reply, generate_html(reply)
  114. elif model_name.lower().startswith('gpt4chan'):
  115. reply = fix_gpt4chan(reply)
  116. return reply, 'Only applicable for galactica models.', generate_4chan_html(reply)
  117. else:
  118. return reply, 'Only applicable for galactica models.', generate_html(reply)
  119. # Choosing the default model
  120. if args.model is not None:
  121. model_name = args.model
  122. else:
  123. if len(available_models) == 0:
  124. print("No models are available! Please download at least one.")
  125. exit(0)
  126. elif len(available_models) == 1:
  127. i = 0
  128. else:
  129. print("The following models are available:\n")
  130. for i,model in enumerate(available_models):
  131. print(f"{i+1}. {model}")
  132. print(f"\nWhich one do you want to load? 1-{len(available_models)}\n")
  133. i = int(input())-1
  134. print()
  135. model_name = available_models[i]
  136. model, tokenizer = load_model(model_name)
  137. # UI settings
  138. if model_name.lower().startswith('gpt4chan'):
  139. default_text = "-----\n--- 865467536\nInput text\n--- 865467537\n"
  140. else:
  141. default_text = "Common sense questions and answers\n\nQuestion: \nFactual answer:"
  142. description = f"""
  143. # Text generation lab
  144. Generate text using Large Language Models.
  145. """
  146. css=".my-4 {margin-top: 0} .py-6 {padding-top: 2.5rem}"
  147. if args.notebook:
  148. with gr.Blocks(css=css, analytics_enabled=False) as interface:
  149. gr.Markdown(description)
  150. with gr.Tab('Raw'):
  151. textbox = gr.Textbox(value=default_text, lines=23)
  152. with gr.Tab('Markdown'):
  153. markdown = gr.Markdown()
  154. with gr.Tab('HTML'):
  155. html = gr.HTML()
  156. btn = gr.Button("Generate")
  157. length_slider = gr.Slider(minimum=1, maximum=2000, step=1, label='max_new_tokens', value=200)
  158. with gr.Row():
  159. with gr.Column():
  160. model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
  161. with gr.Column():
  162. preset_menu = gr.Dropdown(choices=available_presets, value="NovelAI-Sphinx Moth", label='Settings preset')
  163. btn.click(generate_reply, [textbox, length_slider, preset_menu, model_menu], [textbox, markdown, html], show_progress=True, api_name="textgen")
  164. textbox.submit(generate_reply, [textbox, length_slider, preset_menu, model_menu], [textbox, markdown, html], show_progress=True)
  165. elif args.chat:
  166. history = []
  167. def chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context):
  168. question = context+'\n\n'
  169. for i in range(len(history)):
  170. question += f"{name1}: {history[i][0][3:-5].strip()}\n"
  171. question += f"{name2}: {history[i][1][3:-5].strip()}\n"
  172. question += f"{name1}: {text.strip()}\n"
  173. question += f"{name2}:"
  174. reply = generate_reply(question, tokens, inference_settings, selected_model, eos_token='\n')[0]
  175. reply = reply[len(question):].split('\n')[0].strip()
  176. history.append((text, reply))
  177. return history
  178. def clear():
  179. global history
  180. history = []
  181. if 'pygmalion' in model_name.lower():
  182. context_str = "This is a conversation between two people.\n<START>"
  183. name1_str = "You"
  184. name2_str = "Kawaii"
  185. else:
  186. context_str = "This is a conversation between two people."
  187. name1_str = "Person 1"
  188. name2_str = "Person 2"
  189. with gr.Blocks(css=css+".h-\[40vh\] {height: 50vh}", analytics_enabled=False) as interface:
  190. gr.Markdown(description)
  191. with gr.Row():
  192. with gr.Column():
  193. length_slider = gr.Slider(minimum=1, maximum=2000, step=1, label='max_new_tokens', value=200)
  194. with gr.Row():
  195. with gr.Column():
  196. model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
  197. with gr.Column():
  198. preset_menu = gr.Dropdown(choices=available_presets, value="NovelAI-Sphinx Moth", label='Settings preset')
  199. name1 = gr.Textbox(value=name1_str, lines=1, label='Your name')
  200. name2 = gr.Textbox(value=name2_str, lines=1, label='Bot\'s name')
  201. context = gr.Textbox(value=context_str, lines=2, label='Context')
  202. with gr.Column():
  203. display1 = gr.Chatbot()
  204. textbox = gr.Textbox(lines=2, label='Input')
  205. btn = gr.Button("Generate")
  206. btn2 = gr.Button("Clear history")
  207. btn.click(chatbot_wrapper, [textbox, length_slider, preset_menu, model_menu, name1, name2, context], display1, show_progress=True, api_name="textgen")
  208. textbox.submit(chatbot_wrapper, [textbox, length_slider, preset_menu, model_menu, name1, name2, context], display1, show_progress=True)
  209. btn2.click(clear)
  210. btn.click(lambda x: "", textbox, textbox, show_progress=False)
  211. textbox.submit(lambda x: "", textbox, textbox, show_progress=False)
  212. btn2.click(lambda x: "", display1, display1)
  213. else:
  214. def continue_wrapper(question, tokens, inference_settings, selected_model):
  215. a, b, c = generate_reply(question, tokens, inference_settings, selected_model)
  216. return a, a, b, c
  217. with gr.Blocks(css=css, analytics_enabled=False) as interface:
  218. gr.Markdown(description)
  219. with gr.Row():
  220. with gr.Column():
  221. textbox = gr.Textbox(value=default_text, lines=15, label='Input')
  222. length_slider = gr.Slider(minimum=1, maximum=2000, step=1, label='max_new_tokens', value=200)
  223. preset_menu = gr.Dropdown(choices=available_presets, value="NovelAI-Sphinx Moth", label='Settings preset')
  224. model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
  225. btn = gr.Button("Generate")
  226. cont = gr.Button("Continue")
  227. with gr.Column():
  228. with gr.Tab('Raw'):
  229. output_textbox = gr.Textbox(lines=15, label='Output')
  230. with gr.Tab('Markdown'):
  231. markdown = gr.Markdown()
  232. with gr.Tab('HTML'):
  233. html = gr.HTML()
  234. btn.click(generate_reply, [textbox, length_slider, preset_menu, model_menu], [output_textbox, markdown, html], show_progress=True, api_name="textgen")
  235. cont.click(continue_wrapper, [output_textbox, length_slider, preset_menu, model_menu], [output_textbox, textbox, markdown, html], show_progress=True)
  236. textbox.submit(generate_reply, [textbox, length_slider, preset_menu, model_menu], [output_textbox, markdown, html], show_progress=True)
  237. if args.no_listen:
  238. interface.launch(share=False)
  239. else:
  240. interface.launch(share=False, server_name="0.0.0.0")