server.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. import re
  2. import time
  3. import glob
  4. from sys import exit
  5. import torch
  6. import argparse
  7. import json
  8. from pathlib import Path
  9. import gradio as gr
  10. import transformers
  11. from html_generator import *
  12. from transformers import AutoTokenizer, AutoModelForCausalLM
  13. import warnings
  14. import gc
  15. from tqdm import tqdm
  16. transformers.logging.set_verbosity_error()
  17. parser = argparse.ArgumentParser()
  18. parser.add_argument('--model', type=str, help='Name of the model to load by default.')
  19. parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.')
  20. parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode.')
  21. parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file profile.png or profile.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture.')
  22. parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
  23. parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
  24. parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
  25. parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
  26. parser.add_argument('--disk-cache-dir', type=str, help='Directory to save the disk cache to. Defaults to "cache/".')
  27. parser.add_argument('--gpu-memory', type=int, help='Maximum GPU memory in GiB to allocate. This is useful if you get out of memory errors while trying to generate text. Must be an integer number.')
  28. parser.add_argument('--cpu-memory', type=int, help='Maximum CPU memory in GiB to allocate for offloaded weights. Must be an integer number. Defaults to 99 GiB.')
  29. parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the text output in real time. This slightly improves the text generation performance.')
  30. parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example.')
  31. parser.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.')
  32. parser.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
  33. args = parser.parse_args()
  34. loaded_preset = None
  35. available_models = sorted(set([item.replace('.pt', '') for item in map(lambda x : str(x.name), list(Path('models/').glob('*'))+list(Path('torch-dumps/').glob('*'))) if not item.endswith('.txt')]), key=str.lower)
  36. available_presets = sorted(set(map(lambda x : '.'.join(str(x.name).split('.')[:-1]), Path('presets').glob('*.txt'))), key=str.lower)
  37. available_characters = sorted(set(map(lambda x : '.'.join(str(x.name).split('.')[:-1]), Path('characters').glob('*.json'))), key=str.lower)
  38. settings = {
  39. 'max_new_tokens': 200,
  40. 'max_new_tokens_min': 1,
  41. 'max_new_tokens_max': 2000,
  42. 'preset': 'NovelAI-Sphinx Moth',
  43. 'name1': 'Person 1',
  44. 'name2': 'Person 2',
  45. 'context': 'This is a conversation between two people.',
  46. 'prompt': 'Common sense questions and answers\n\nQuestion: \nFactual answer:',
  47. 'prompt_gpt4chan': '-----\n--- 865467536\nInput text\n--- 865467537\n',
  48. 'stop_at_newline': True,
  49. 'history_size': 8,
  50. 'history_size_min': 0,
  51. 'history_size_max': 64,
  52. 'preset_pygmalion': 'Pygmalion',
  53. 'name1_pygmalion': 'You',
  54. 'name2_pygmalion': 'Kawaii',
  55. 'context_pygmalion': 'This is a conversation between two people.\n<START>',
  56. 'stop_at_newline_pygmalion': False,
  57. }
  58. if args.settings is not None and Path(args.settings).exists():
  59. with open(Path(args.settings), 'r') as f:
  60. new_settings = json.load(f)
  61. for item in new_settings:
  62. if item in settings:
  63. settings[item] = new_settings[item]
  64. def load_model(model_name):
  65. print(f"Loading {model_name}...")
  66. t0 = time.time()
  67. # Default settings
  68. if not (args.cpu or args.load_in_8bit or args.auto_devices or args.disk or args.gpu_memory is not None):
  69. if Path(f"torch-dumps/{model_name}.pt").exists():
  70. print("Loading in .pt format...")
  71. model = torch.load(Path(f"torch-dumps/{model_name}.pt"))
  72. elif model_name.lower().startswith(('gpt-neo', 'opt-', 'galactica')) and any(size in model_name.lower() for size in ('13b', '20b', '30b')):
  73. model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), device_map='auto', load_in_8bit=True)
  74. else:
  75. model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.float16).cuda()
  76. # Custom
  77. else:
  78. settings = ["low_cpu_mem_usage=True"]
  79. command = "AutoModelForCausalLM.from_pretrained"
  80. if args.cpu:
  81. settings.append("torch_dtype=torch.float32")
  82. else:
  83. settings.append("device_map='auto'")
  84. if args.gpu_memory is not None:
  85. if args.cpu_memory is not None:
  86. settings.append(f"max_memory={{0: '{args.gpu_memory}GiB', 'cpu': '{args.cpu_memory}GiB'}}")
  87. else:
  88. settings.append(f"max_memory={{0: '{args.gpu_memory}GiB', 'cpu': '99GiB'}}")
  89. if args.disk:
  90. if args.disk_cache_dir is not None:
  91. settings.append("offload_folder='"+args.disk_cache_dir+"'")
  92. else:
  93. settings.append("offload_folder='cache'")
  94. if args.load_in_8bit:
  95. settings.append("load_in_8bit=True")
  96. else:
  97. settings.append("torch_dtype=torch.float16")
  98. settings = ', '.join(set(settings))
  99. command = f"{command}(Path(f'models/{model_name}'), {settings})"
  100. model = eval(command)
  101. # Loading the tokenizer
  102. if model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')) and Path(f"models/gpt-j-6B/").exists():
  103. tokenizer = AutoTokenizer.from_pretrained(Path("models/gpt-j-6B/"))
  104. else:
  105. tokenizer = AutoTokenizer.from_pretrained(Path(f"models/{model_name}/"))
  106. tokenizer.truncation_side = 'left'
  107. print(f"Loaded the model in {(time.time()-t0):.2f} seconds.")
  108. return model, tokenizer
  109. # Removes empty replies from gpt4chan outputs
  110. def fix_gpt4chan(s):
  111. for i in range(10):
  112. s = re.sub("--- [0-9]*\n>>[0-9]*\n---", "---", s)
  113. s = re.sub("--- [0-9]*\n *\n---", "---", s)
  114. s = re.sub("--- [0-9]*\n\n\n---", "---", s)
  115. return s
  116. # Fix the LaTeX equations in galactica
  117. def fix_galactica(s):
  118. s = s.replace(r'\[', r'$')
  119. s = s.replace(r'\]', r'$')
  120. s = s.replace(r'\(', r'$')
  121. s = s.replace(r'\)', r'$')
  122. s = s.replace(r'$$', r'$')
  123. return s
  124. def encode(prompt, tokens):
  125. if not args.cpu:
  126. torch.cuda.empty_cache()
  127. input_ids = tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=2048-tokens).cuda()
  128. else:
  129. input_ids = tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=2048-tokens)
  130. return input_ids
  131. def decode(output_ids):
  132. reply = tokenizer.decode(output_ids, skip_special_tokens=True)
  133. reply = reply.replace(r'<|endoftext|>', '')
  134. return reply
  135. def formatted_outputs(reply, model_name):
  136. if not (args.chat or args.cai_chat):
  137. if model_name.lower().startswith('galactica'):
  138. reply = fix_galactica(reply)
  139. return reply, reply, generate_basic_html(reply)
  140. elif model_name.lower().startswith('gpt4chan'):
  141. reply = fix_gpt4chan(reply)
  142. return reply, 'Only applicable for GALACTICA models.', generate_4chan_html(reply)
  143. else:
  144. return reply, 'Only applicable for GALACTICA models.', generate_basic_html(reply)
  145. else:
  146. return reply
  147. def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None):
  148. global model, tokenizer, model_name, loaded_preset, preset
  149. if selected_model != model_name:
  150. model_name = selected_model
  151. model = tokenizer = None
  152. if not args.cpu:
  153. gc.collect()
  154. torch.cuda.empty_cache()
  155. model, tokenizer = load_model(model_name)
  156. if inference_settings != loaded_preset:
  157. with open(Path(f'presets/{inference_settings}.txt'), 'r') as infile:
  158. preset = infile.read()
  159. loaded_preset = inference_settings
  160. cuda = "" if args.cpu else ".cuda()"
  161. n = None if eos_token is None else tokenizer.encode(eos_token, return_tensors='pt')[0][-1]
  162. input_ids = encode(question, tokens)
  163. # Generate the entire reply at once
  164. if args.no_stream:
  165. output = eval(f"model.generate(input_ids, eos_token_id={n}, {preset}){cuda}")
  166. reply = decode(output[0])
  167. yield formatted_outputs(reply, model_name)
  168. # Generate the reply 1 token at a time
  169. else:
  170. yield formatted_outputs(question, model_name)
  171. preset = preset.replace('max_new_tokens=tokens', 'max_new_tokens=1')
  172. for i in tqdm(range(tokens)):
  173. output = eval(f"model.generate(input_ids, {preset}){cuda}")
  174. reply = decode(output[0])
  175. if eos_token is not None and reply[-1] == eos_token:
  176. break
  177. yield formatted_outputs(reply, model_name)
  178. input_ids = output
  179. # Choosing the default model
  180. if args.model is not None:
  181. model_name = args.model
  182. else:
  183. if len(available_models) == 0:
  184. print("No models are available! Please download at least one.")
  185. exit(0)
  186. elif len(available_models) == 1:
  187. i = 0
  188. else:
  189. print("The following models are available:\n")
  190. for i,model in enumerate(available_models):
  191. print(f"{i+1}. {model}")
  192. print(f"\nWhich one do you want to load? 1-{len(available_models)}\n")
  193. i = int(input())-1
  194. print()
  195. model_name = available_models[i]
  196. model, tokenizer = load_model(model_name)
  197. # UI settings
  198. if model_name.lower().startswith('gpt4chan'):
  199. default_text = settings['prompt_gpt4chan']
  200. else:
  201. default_text = settings['prompt']
  202. description = f"\n\n# Text generation lab\nGenerate text using Large Language Models.\n"
  203. css = ".my-4 {margin-top: 0} .py-6 {padding-top: 2.5rem}"
  204. if args.chat or args.cai_chat:
  205. history = []
  206. character = None
  207. # This gets the new line characters right.
  208. def clean_chat_message(text):
  209. text = text.replace('\n', '\n\n')
  210. text = re.sub(r"\n{3,}", "\n\n", text)
  211. text = text.strip()
  212. return text
  213. def generate_chat_prompt(text, tokens, name1, name2, context, history_size):
  214. text = clean_chat_message(text)
  215. rows = [f"{context.strip()}\n"]
  216. i = len(history)-1
  217. count = 0
  218. while i >= 0 and len(encode(''.join(rows), tokens)[0]) < 2048-tokens:
  219. rows.insert(1, f"{name2}: {history[i][1].strip()}\n")
  220. count += 1
  221. if not (i == 0 and len(history[i][0]) == 0):
  222. rows.insert(1, f"{name1}: {history[i][0].strip()}\n")
  223. count += 1
  224. i -= 1
  225. if history_size != 0 and count >= history_size:
  226. break
  227. rows.append(f"{name1}: {text}\n")
  228. rows.append(f"{name2}:")
  229. while len(rows) > 3 and len(encode(''.join(rows), tokens)[0]) >= 2048-tokens:
  230. rows.pop(1)
  231. rows.pop(1)
  232. question = ''.join(rows)
  233. question = question.replace('<|BEGIN-VISIBLE-CHAT|>', '')
  234. return question
  235. def chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check, history_size):
  236. question = generate_chat_prompt(text, tokens, name1, name2, context, history_size)
  237. history.append(['', ''])
  238. eos_token = '\n' if check else None
  239. for reply in generate_reply(question, tokens, inference_settings, selected_model, eos_token=eos_token):
  240. next_character_found = False
  241. previous_idx = [m.start() for m in re.finditer(f"(^|\n){name2}:", question)]
  242. idx = [m.start() for m in re.finditer(f"(^|\n){name2}:", reply)]
  243. idx = idx[len(previous_idx)-1]
  244. reply = reply[idx + len(f"\n{name2}:"):]
  245. if check:
  246. reply = reply.split('\n')[0].strip()
  247. else:
  248. idx = reply.find(f"\n{name1}:")
  249. if idx != -1:
  250. reply = reply[:idx]
  251. next_character_found = True
  252. reply = clean_chat_message(reply)
  253. history[-1] = [text, reply]
  254. if next_character_found:
  255. break
  256. # Prevent the chat log from flashing if something like "\nYo" is generated just
  257. # before "\nYou:" is completed
  258. tmp = f"\n{name1}:"
  259. next_character_substring_found = False
  260. for j in range(1, len(tmp)):
  261. if reply[-j:] == tmp[:j]:
  262. next_character_substring_found = True
  263. if not next_character_substring_found:
  264. yield history
  265. yield history
  266. def cai_chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check, history_size):
  267. for history in chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context, check, history_size):
  268. yield generate_chat_html(history, name1, name2, character)
  269. def remove_last_message(name1, name2):
  270. history.pop()
  271. if args.cai_chat:
  272. return generate_chat_html(history, name1, name2, character)
  273. else:
  274. return history
  275. def clear():
  276. global history
  277. history = []
  278. def clear_html():
  279. return generate_chat_html([], "", "", character)
  280. def redraw_html(name1, name2):
  281. global history
  282. return generate_chat_html(history, name1, name2, character)
  283. def save_history():
  284. _history = copy.deepcopy(history)
  285. for i in range(len(_history)):
  286. if '<|BEGIN-VISIBLE-CHAT|>' in history[i][0]:
  287. _history[i][0] = _history[i][0].replace('<|BEGIN-VISIBLE-CHAT|>', '')
  288. _history = _history[i:]
  289. break
  290. if not Path('logs').exists():
  291. Path('logs').mkdir()
  292. with open(Path('logs/conversation.json'), 'w') as f:
  293. f.write(json.dumps({'data': _history}))
  294. return Path('logs/conversation.json')
  295. def load_history(file):
  296. global history
  297. history = json.loads(file.decode('utf-8'))['data']
  298. def tokenize_example_dialogue(dialogue, name1, name2):
  299. dialogue = re.sub('<START>', '', dialogue)
  300. dialogue = re.sub('(\n|^)[Aa]non:', '\\1You:', dialogue)
  301. idx = [m.start() for m in re.finditer(f"(^|\n)({name1}|{name2}):", dialogue)]
  302. messages = []
  303. for i in range(len(idx)-1):
  304. messages.append(dialogue[idx[i]:idx[i+1]].strip())
  305. history = []
  306. entry = ['', '']
  307. for i in messages:
  308. if i.startswith(f'{name1}:'):
  309. entry[0] = i[len(f'{name1}:'):].strip()
  310. elif i.startswith(f'{name2}:'):
  311. entry[1] = i[len(f'{name2}:'):].strip()
  312. if not (len(entry[0]) == 0 and len(entry[1]) == 0):
  313. history.append(entry)
  314. entry = ['', '']
  315. return history
  316. def load_character(_character, name1, name2):
  317. global history, character
  318. context = ""
  319. history = []
  320. if _character != 'None':
  321. character = _character
  322. with open(Path(f'characters/{_character}.json'), 'r') as f:
  323. data = json.loads(f.read())
  324. name2 = data['char_name']
  325. if 'char_persona' in data and data['char_persona'] != '':
  326. context += f"{data['char_name']}'s Persona: {data['char_persona']}\n"
  327. if 'world_scenario' in data and data['world_scenario'] != '':
  328. context += f"Scenario: {data['world_scenario']}\n"
  329. context = f"{context.strip()}\n<START>\n"
  330. if 'example_dialogue' in data and data['example_dialogue'] != '':
  331. history = tokenize_example_dialogue(data['example_dialogue'], name1, name2)
  332. if 'char_greeting' in data and len(data['char_greeting'].strip()) > 0:
  333. history += [['<|BEGIN-VISIBLE-CHAT|>', data['char_greeting']]]
  334. else:
  335. history += [['<|BEGIN-VISIBLE-CHAT|>', "Hello there!"]]
  336. else:
  337. character = None
  338. context = settings['context_pygmalion']
  339. name2 = settings['name2_pygmalion']
  340. if args.cai_chat:
  341. return name2, context, generate_chat_html(history, name1, name2, character)
  342. else:
  343. return name2, context, history
  344. suffix = '_pygmalion' if 'pygmalion' in model_name.lower() else ''
  345. with gr.Blocks(css=css+".h-\[40vh\] {height: 66.67vh} .gradio-container {max-width: 800px; margin-left: auto; margin-right: auto}", analytics_enabled=False) as interface:
  346. if args.cai_chat:
  347. display1 = gr.HTML(value=generate_chat_html([], "", "", character))
  348. else:
  349. display1 = gr.Chatbot()
  350. textbox = gr.Textbox(lines=2, label='Input')
  351. btn = gr.Button("Generate")
  352. with gr.Row():
  353. btn2 = gr.Button("Clear history")
  354. stop = gr.Button("Stop")
  355. btn3 = gr.Button("Remove last message")
  356. with gr.Row():
  357. with gr.Column():
  358. length_slider = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens'])
  359. model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
  360. with gr.Column():
  361. history_size_slider = gr.Slider(minimum=settings['history_size_min'], maximum=settings['history_size_max'], step=1, label='Chat history size (0 for no limit)', value=settings['history_size'])
  362. preset_menu = gr.Dropdown(choices=available_presets, value=settings[f'preset{suffix}'], label='Settings preset')
  363. name1 = gr.Textbox(value=settings[f'name1{suffix}'], lines=1, label='Your name')
  364. name2 = gr.Textbox(value=settings[f'name2{suffix}'], lines=1, label='Bot\'s name')
  365. context = gr.Textbox(value=settings[f'context{suffix}'], lines=2, label='Context')
  366. with gr.Row():
  367. character_menu = gr.Dropdown(choices=["None"]+available_characters, value="None", label='Character')
  368. with gr.Row():
  369. check = gr.Checkbox(value=settings[f'stop_at_newline{suffix}'], label='Stop generating at new line character?')
  370. with gr.Row():
  371. with gr.Column():
  372. gr.Markdown("Upload chat history")
  373. upload = gr.File(type='binary')
  374. with gr.Column():
  375. gr.Markdown("Download chat history")
  376. save_btn = gr.Button(value="Click me")
  377. download = gr.File()
  378. input_params = [textbox, length_slider, preset_menu, model_menu, name1, name2, context, check, history_size_slider]
  379. if args.cai_chat:
  380. gen_event = btn.click(cai_chatbot_wrapper, input_params, display1, show_progress=args.no_stream, api_name="textgen")
  381. gen_event2 = textbox.submit(cai_chatbot_wrapper, input_params, display1, show_progress=args.no_stream)
  382. btn2.click(clear_html, [], display1, show_progress=False)
  383. else:
  384. gen_event = btn.click(chatbot_wrapper, input_params, display1, show_progress=args.no_stream, api_name="textgen")
  385. gen_event2 = textbox.submit(chatbot_wrapper, input_params, display1, show_progress=args.no_stream)
  386. btn2.click(lambda x: "", display1, display1, show_progress=False)
  387. btn2.click(clear)
  388. btn3.click(remove_last_message, [name1, name2], display1, show_progress=False)
  389. btn.click(lambda x: "", textbox, textbox, show_progress=False)
  390. textbox.submit(lambda x: "", textbox, textbox, show_progress=False)
  391. stop.click(None, None, None, cancels=[gen_event, gen_event2])
  392. save_btn.click(save_history, inputs=[], outputs=[download])
  393. upload.upload(load_history, [upload], [])
  394. character_menu.change(load_character, [character_menu, name1, name2], [name2, context, display1])
  395. if args.cai_chat:
  396. upload.upload(redraw_html, [name1, name2], [display1])
  397. else:
  398. upload.upload(lambda : history, [], [display1])
  399. elif args.notebook:
  400. with gr.Blocks(css=css, analytics_enabled=False) as interface:
  401. gr.Markdown(description)
  402. with gr.Tab('Raw'):
  403. textbox = gr.Textbox(value=default_text, lines=23)
  404. with gr.Tab('Markdown'):
  405. markdown = gr.Markdown()
  406. with gr.Tab('HTML'):
  407. html = gr.HTML()
  408. btn = gr.Button("Generate")
  409. stop = gr.Button("Stop")
  410. length_slider = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens'])
  411. with gr.Row():
  412. with gr.Column():
  413. model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
  414. with gr.Column():
  415. preset_menu = gr.Dropdown(choices=available_presets, value=settings['preset'], label='Settings preset')
  416. gen_event = btn.click(generate_reply, [textbox, length_slider, preset_menu, model_menu], [textbox, markdown, html], show_progress=args.no_stream, api_name="textgen")
  417. gen_event2 = textbox.submit(generate_reply, [textbox, length_slider, preset_menu, model_menu], [textbox, markdown, html], show_progress=args.no_stream)
  418. stop.click(None, None, None, cancels=[gen_event, gen_event2])
  419. else:
  420. with gr.Blocks(css=css, analytics_enabled=False) as interface:
  421. gr.Markdown(description)
  422. with gr.Row():
  423. with gr.Column():
  424. textbox = gr.Textbox(value=default_text, lines=15, label='Input')
  425. length_slider = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens'])
  426. preset_menu = gr.Dropdown(choices=available_presets, value=settings['preset'], label='Settings preset')
  427. model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model')
  428. btn = gr.Button("Generate")
  429. with gr.Row():
  430. with gr.Column():
  431. cont = gr.Button("Continue")
  432. with gr.Column():
  433. stop = gr.Button("Stop")
  434. with gr.Column():
  435. with gr.Tab('Raw'):
  436. output_textbox = gr.Textbox(lines=15, label='Output')
  437. with gr.Tab('Markdown'):
  438. markdown = gr.Markdown()
  439. with gr.Tab('HTML'):
  440. html = gr.HTML()
  441. gen_event = btn.click(generate_reply, [textbox, length_slider, preset_menu, model_menu], [output_textbox, markdown, html], show_progress=args.no_stream, api_name="textgen")
  442. gen_event2 = textbox.submit(generate_reply, [textbox, length_slider, preset_menu, model_menu], [output_textbox, markdown, html], show_progress=args.no_stream)
  443. cont_event = cont.click(generate_reply, [output_textbox, length_slider, preset_menu, model_menu], [output_textbox, markdown, html], show_progress=args.no_stream)
  444. stop.click(None, None, None, cancels=[gen_event, gen_event2, cont_event])
  445. interface.queue()
  446. if args.listen:
  447. interface.launch(share=args.share, server_name="0.0.0.0")
  448. else:
  449. interface.launch(share=args.share)