server.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. import os
  2. os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
  3. import io
  4. import json
  5. import re
  6. import sys
  7. import time
  8. import zipfile
  9. from datetime import datetime
  10. from pathlib import Path
  11. import gradio as gr
  12. from PIL import Image
  13. import modules.extensions as extensions_module
  14. from modules import chat, shared, training, ui, api
  15. from modules.html_generator import chat_html_wrapper
  16. from modules.LoRA import add_lora_to_model
  17. from modules.models import load_model, load_soft_prompt
  18. from modules.text_generation import (clear_torch_cache, generate_reply,
  19. stop_everything_event)
  20. # Loading custom settings
  21. settings_file = None
  22. if shared.args.settings is not None and Path(shared.args.settings).exists():
  23. settings_file = Path(shared.args.settings)
  24. elif Path('settings.json').exists():
  25. settings_file = Path('settings.json')
  26. if settings_file is not None:
  27. print(f"Loading settings from {settings_file}...")
  28. new_settings = json.loads(open(settings_file, 'r').read())
  29. for item in new_settings:
  30. shared.settings[item] = new_settings[item]
  31. def get_available_models():
  32. if shared.args.flexgen:
  33. return sorted([re.sub('-np$', '', item.name) for item in list(Path(f'{shared.args.model_dir}/').glob('*')) if item.name.endswith('-np')], key=str.lower)
  34. else:
  35. return sorted([re.sub('.pth$', '', item.name) for item in list(Path(f'{shared.args.model_dir}/').glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json'))], key=str.lower)
  36. def get_available_presets():
  37. return sorted(set((k.stem for k in Path('presets').glob('*.txt'))), key=str.lower)
  38. def get_available_prompts():
  39. prompts = []
  40. prompts += sorted(set((k.stem for k in Path('prompts').glob('[0-9]*.txt'))), key=str.lower, reverse=True)
  41. prompts += sorted(set((k.stem for k in Path('prompts').glob('*.txt'))), key=str.lower)
  42. prompts += ['None']
  43. return prompts
  44. def get_available_characters():
  45. paths = (x for x in Path('characters').iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
  46. return ['None'] + sorted(set((k.stem for k in paths if k.stem != "instruction-following")), key=str.lower)
  47. def get_available_instruction_templates():
  48. path = "characters/instruction-following"
  49. paths = []
  50. if os.path.exists(path):
  51. paths = (x for x in Path(path).iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
  52. return ['None'] + sorted(set((k.stem for k in paths)), key=str.lower)
  53. def get_available_extensions():
  54. return sorted(set(map(lambda x: x.parts[1], Path('extensions').glob('*/script.py'))), key=str.lower)
  55. def get_available_softprompts():
  56. return ['None'] + sorted(set((k.stem for k in Path('softprompts').glob('*.zip'))), key=str.lower)
  57. def get_available_loras():
  58. return ['None'] + sorted([item.name for item in list(Path(shared.args.lora_dir).glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json'))], key=str.lower)
  59. def unload_model():
  60. shared.model = shared.tokenizer = None
  61. clear_torch_cache()
  62. def load_model_wrapper(selected_model):
  63. if selected_model != shared.model_name:
  64. shared.model_name = selected_model
  65. unload_model()
  66. if selected_model != '':
  67. shared.model, shared.tokenizer = load_model(shared.model_name)
  68. return selected_model
  69. def load_lora_wrapper(selected_lora):
  70. add_lora_to_model(selected_lora)
  71. return selected_lora
  72. def load_preset_values(preset_menu, state, return_dict=False):
  73. generate_params = {
  74. 'do_sample': True,
  75. 'temperature': 1,
  76. 'top_p': 1,
  77. 'typical_p': 1,
  78. 'repetition_penalty': 1,
  79. 'encoder_repetition_penalty': 1,
  80. 'top_k': 50,
  81. 'num_beams': 1,
  82. 'penalty_alpha': 0,
  83. 'min_length': 0,
  84. 'length_penalty': 1,
  85. 'no_repeat_ngram_size': 0,
  86. 'early_stopping': False,
  87. }
  88. with open(Path(f'presets/{preset_menu}.txt'), 'r') as infile:
  89. preset = infile.read()
  90. for i in preset.splitlines():
  91. i = i.rstrip(',').strip().split('=')
  92. if len(i) == 2 and i[0].strip() != 'tokens':
  93. generate_params[i[0].strip()] = eval(i[1].strip())
  94. generate_params['temperature'] = min(1.99, generate_params['temperature'])
  95. if return_dict:
  96. return generate_params
  97. else:
  98. state.update(generate_params)
  99. return state, *[generate_params[k] for k in ['do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']]
  100. def upload_soft_prompt(file):
  101. with zipfile.ZipFile(io.BytesIO(file)) as zf:
  102. zf.extract('meta.json')
  103. j = json.loads(open('meta.json', 'r').read())
  104. name = j['name']
  105. Path('meta.json').unlink()
  106. with open(Path(f'softprompts/{name}.zip'), 'wb') as f:
  107. f.write(file)
  108. return name
  109. def save_prompt(text):
  110. fname = f"{datetime.now().strftime('%Y-%m-%d-%H%M%S')}.txt"
  111. with open(Path(f'prompts/{fname}'), 'w', encoding='utf-8') as f:
  112. f.write(text)
  113. return f"Saved to prompts/{fname}"
  114. def load_prompt(fname):
  115. if fname in ['None', '']:
  116. return ''
  117. else:
  118. with open(Path(f'prompts/{fname}.txt'), 'r', encoding='utf-8') as f:
  119. text = f.read()
  120. if text[-1] == '\n':
  121. text = text[:-1]
  122. return text
  123. def create_prompt_menus():
  124. with gr.Row():
  125. with gr.Column():
  126. with gr.Row():
  127. shared.gradio['prompt_menu'] = gr.Dropdown(choices=get_available_prompts(), value='None', label='Prompt')
  128. ui.create_refresh_button(shared.gradio['prompt_menu'], lambda: None, lambda: {'choices': get_available_prompts()}, 'refresh-button')
  129. with gr.Column():
  130. with gr.Column():
  131. shared.gradio['save_prompt'] = gr.Button('Save prompt')
  132. shared.gradio['status'] = gr.Markdown('Ready')
  133. shared.gradio['prompt_menu'].change(load_prompt, [shared.gradio['prompt_menu']], [shared.gradio['textbox']], show_progress=False)
  134. shared.gradio['save_prompt'].click(save_prompt, [shared.gradio['textbox']], [shared.gradio['status']], show_progress=False)
  135. def create_model_menus():
  136. with gr.Row():
  137. with gr.Column():
  138. with gr.Row():
  139. shared.gradio['model_menu'] = gr.Dropdown(choices=available_models, value=shared.model_name, label='Model')
  140. ui.create_refresh_button(shared.gradio['model_menu'], lambda: None, lambda: {'choices': get_available_models()}, 'refresh-button')
  141. with gr.Column():
  142. with gr.Row():
  143. shared.gradio['lora_menu'] = gr.Dropdown(choices=available_loras, value=shared.lora_name, label='LoRA')
  144. ui.create_refresh_button(shared.gradio['lora_menu'], lambda: None, lambda: {'choices': get_available_loras()}, 'refresh-button')
  145. shared.gradio['model_menu'].change(load_model_wrapper, shared.gradio['model_menu'], shared.gradio['model_menu'], show_progress=True)
  146. shared.gradio['lora_menu'].change(load_lora_wrapper, shared.gradio['lora_menu'], shared.gradio['lora_menu'], show_progress=True)
  147. def create_settings_menus(default_preset):
  148. generate_params = load_preset_values(default_preset if not shared.args.flexgen else 'Naive', {}, return_dict=True)
  149. for k in ['max_new_tokens', 'seed', 'stop_at_newline', 'chat_prompt_size', 'chat_generation_attempts']:
  150. generate_params[k] = shared.settings[k]
  151. shared.gradio['generate_state'] = gr.State(generate_params)
  152. with gr.Row():
  153. with gr.Column():
  154. with gr.Row():
  155. shared.gradio['preset_menu'] = gr.Dropdown(choices=available_presets, value=default_preset if not shared.args.flexgen else 'Naive', label='Generation parameters preset')
  156. ui.create_refresh_button(shared.gradio['preset_menu'], lambda: None, lambda: {'choices': get_available_presets()}, 'refresh-button')
  157. with gr.Column():
  158. shared.gradio['seed'] = gr.Number(value=shared.settings['seed'], label='Seed (-1 for random)')
  159. with gr.Row():
  160. with gr.Column():
  161. with gr.Box():
  162. gr.Markdown('Custom generation parameters ([reference](https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig))')
  163. with gr.Row():
  164. with gr.Column():
  165. shared.gradio['temperature'] = gr.Slider(0.01, 1.99, value=generate_params['temperature'], step=0.01, label='temperature')
  166. shared.gradio['top_p'] = gr.Slider(0.0, 1.0, value=generate_params['top_p'], step=0.01, label='top_p')
  167. shared.gradio['top_k'] = gr.Slider(0, 200, value=generate_params['top_k'], step=1, label='top_k')
  168. shared.gradio['typical_p'] = gr.Slider(0.0, 1.0, value=generate_params['typical_p'], step=0.01, label='typical_p')
  169. with gr.Column():
  170. shared.gradio['repetition_penalty'] = gr.Slider(1.0, 1.5, value=generate_params['repetition_penalty'], step=0.01, label='repetition_penalty')
  171. shared.gradio['encoder_repetition_penalty'] = gr.Slider(0.8, 1.5, value=generate_params['encoder_repetition_penalty'], step=0.01, label='encoder_repetition_penalty')
  172. shared.gradio['no_repeat_ngram_size'] = gr.Slider(0, 20, step=1, value=generate_params['no_repeat_ngram_size'], label='no_repeat_ngram_size')
  173. shared.gradio['min_length'] = gr.Slider(0, 2000, step=1, value=generate_params['min_length'] if shared.args.no_stream else 0, label='min_length', interactive=shared.args.no_stream)
  174. shared.gradio['do_sample'] = gr.Checkbox(value=generate_params['do_sample'], label='do_sample')
  175. with gr.Column():
  176. with gr.Box():
  177. gr.Markdown('Contrastive search')
  178. shared.gradio['penalty_alpha'] = gr.Slider(0, 5, value=generate_params['penalty_alpha'], label='penalty_alpha')
  179. with gr.Box():
  180. gr.Markdown('Beam search (uses a lot of VRAM)')
  181. with gr.Row():
  182. with gr.Column():
  183. shared.gradio['num_beams'] = gr.Slider(1, 20, step=1, value=generate_params['num_beams'], label='num_beams')
  184. with gr.Column():
  185. shared.gradio['length_penalty'] = gr.Slider(-5, 5, value=generate_params['length_penalty'], label='length_penalty')
  186. shared.gradio['early_stopping'] = gr.Checkbox(value=generate_params['early_stopping'], label='early_stopping')
  187. with gr.Accordion('Soft prompt', open=False):
  188. with gr.Row():
  189. shared.gradio['softprompts_menu'] = gr.Dropdown(choices=available_softprompts, value='None', label='Soft prompt')
  190. ui.create_refresh_button(shared.gradio['softprompts_menu'], lambda: None, lambda: {'choices': get_available_softprompts()}, 'refresh-button')
  191. gr.Markdown('Upload a soft prompt (.zip format):')
  192. with gr.Row():
  193. shared.gradio['upload_softprompt'] = gr.File(type='binary', file_types=['.zip'])
  194. shared.gradio['preset_menu'].change(load_preset_values, [shared.gradio[k] for k in ['preset_menu', 'generate_state']], [shared.gradio[k] for k in ['generate_state', 'do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']])
  195. shared.gradio['softprompts_menu'].change(load_soft_prompt, shared.gradio['softprompts_menu'], shared.gradio['softprompts_menu'], show_progress=True)
  196. shared.gradio['upload_softprompt'].upload(upload_soft_prompt, shared.gradio['upload_softprompt'], shared.gradio['softprompts_menu'])
  197. def set_interface_arguments(interface_mode, extensions, bool_active):
  198. modes = ["default", "notebook", "chat", "cai_chat"]
  199. cmd_list = vars(shared.args)
  200. bool_list = [k for k in cmd_list if type(cmd_list[k]) is bool and k not in modes]
  201. shared.args.extensions = extensions
  202. for k in modes[1:]:
  203. exec(f"shared.args.{k} = False")
  204. if interface_mode != "default":
  205. exec(f"shared.args.{interface_mode} = True")
  206. for k in bool_list:
  207. exec(f"shared.args.{k} = False")
  208. for k in bool_active:
  209. exec(f"shared.args.{k} = True")
  210. shared.need_restart = True
  211. available_models = get_available_models()
  212. available_presets = get_available_presets()
  213. available_characters = get_available_characters()
  214. available_softprompts = get_available_softprompts()
  215. available_loras = get_available_loras()
  216. # Default extensions
  217. extensions_module.available_extensions = get_available_extensions()
  218. if shared.is_chat():
  219. for extension in shared.settings['chat_default_extensions']:
  220. shared.args.extensions = shared.args.extensions or []
  221. if extension not in shared.args.extensions:
  222. shared.args.extensions.append(extension)
  223. else:
  224. for extension in shared.settings['default_extensions']:
  225. shared.args.extensions = shared.args.extensions or []
  226. if extension not in shared.args.extensions:
  227. shared.args.extensions.append(extension)
  228. # Default model
  229. if shared.args.model is not None:
  230. shared.model_name = shared.args.model
  231. else:
  232. if len(available_models) == 0:
  233. print('No models are available! Please download at least one.')
  234. sys.exit(0)
  235. elif len(available_models) == 1:
  236. i = 0
  237. else:
  238. print('The following models are available:\n')
  239. for i, model in enumerate(available_models):
  240. print(f'{i+1}. {model}')
  241. print(f'\nWhich one do you want to load? 1-{len(available_models)}\n')
  242. i = int(input()) - 1
  243. print()
  244. shared.model_name = available_models[i]
  245. shared.model, shared.tokenizer = load_model(shared.model_name)
  246. if shared.args.lora:
  247. add_lora_to_model(shared.args.lora)
  248. # Default UI settings
  249. default_preset = shared.settings['presets'][next((k for k in shared.settings['presets'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
  250. if shared.lora_name != "None":
  251. default_text = load_prompt(shared.settings['lora_prompts'][next((k for k in shared.settings['lora_prompts'] if re.match(k.lower(), shared.lora_name.lower())), 'default')])
  252. else:
  253. default_text = load_prompt(shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.model_name.lower())), 'default')])
  254. title = 'Text generation web UI'
  255. def create_interface():
  256. gen_events = []
  257. if shared.args.extensions is not None and len(shared.args.extensions) > 0:
  258. extensions_module.load_extensions()
  259. with gr.Blocks(css=ui.css if not shared.is_chat() else ui.css + ui.chat_css, analytics_enabled=False, title=title) as shared.gradio['interface']:
  260. if shared.is_chat():
  261. shared.gradio['Chat input'] = gr.State()
  262. with gr.Tab("Text generation", elem_id="main"):
  263. shared.gradio['display'] = gr.HTML(value=chat_html_wrapper(shared.history['visible'], shared.settings['name1'], shared.settings['name2'], 'cai-chat'))
  264. shared.gradio['textbox'] = gr.Textbox(label='Input')
  265. with gr.Row():
  266. shared.gradio['Generate'] = gr.Button('Generate')
  267. shared.gradio['Stop'] = gr.Button('Stop', elem_id="stop")
  268. with gr.Row():
  269. shared.gradio['Impersonate'] = gr.Button('Impersonate')
  270. shared.gradio['Regenerate'] = gr.Button('Regenerate')
  271. with gr.Row():
  272. shared.gradio['Copy last reply'] = gr.Button('Copy last reply')
  273. shared.gradio['Replace last reply'] = gr.Button('Replace last reply')
  274. shared.gradio['Remove last'] = gr.Button('Remove last')
  275. shared.gradio['Clear history'] = gr.Button('Clear history')
  276. shared.gradio['Clear history-confirm'] = gr.Button('Confirm', variant="stop", visible=False)
  277. shared.gradio['Clear history-cancel'] = gr.Button('Cancel', visible=False)
  278. shared.gradio["Chat mode"] = gr.Radio(choices=["cai-chat", "chat", "instruct"], value="cai-chat", label="Mode")
  279. shared.gradio["Instruction templates"] = gr.Dropdown(choices=get_available_instruction_templates(), label="Instruction template", value="None", visible=False)
  280. with gr.Tab("Character", elem_id="chat-settings"):
  281. with gr.Row():
  282. with gr.Column(scale=8):
  283. shared.gradio['name1'] = gr.Textbox(value=shared.settings['name1'], lines=1, label='Your name')
  284. shared.gradio['name2'] = gr.Textbox(value=shared.settings['name2'], lines=1, label='Character\'s name')
  285. shared.gradio['greeting'] = gr.Textbox(value=shared.settings['greeting'], lines=4, label='Greeting')
  286. shared.gradio['context'] = gr.Textbox(value=shared.settings['context'], lines=4, label='Context')
  287. shared.gradio['end_of_turn'] = gr.Textbox(value=shared.settings["end_of_turn"], lines=1, label='End of turn string')
  288. with gr.Column(scale=1):
  289. shared.gradio['character_picture'] = gr.Image(label='Character picture', type="pil")
  290. shared.gradio['your_picture'] = gr.Image(label='Your picture', type="pil", value=Image.open(Path("cache/pfp_me.png")) if Path("cache/pfp_me.png").exists() else None)
  291. with gr.Row():
  292. shared.gradio['character_menu'] = gr.Dropdown(choices=available_characters, value='None', label='Character', elem_id='character-menu')
  293. ui.create_refresh_button(shared.gradio['character_menu'], lambda: None, lambda: {'choices': get_available_characters()}, 'refresh-button')
  294. with gr.Row():
  295. with gr.Tab('Chat history'):
  296. with gr.Row():
  297. with gr.Column():
  298. gr.Markdown('Upload')
  299. shared.gradio['upload_chat_history'] = gr.File(type='binary', file_types=['.json', '.txt'])
  300. with gr.Column():
  301. gr.Markdown('Download')
  302. shared.gradio['download'] = gr.File()
  303. shared.gradio['download_button'] = gr.Button(value='Click me')
  304. with gr.Tab('Upload character'):
  305. gr.Markdown("# JSON format")
  306. with gr.Row():
  307. with gr.Column():
  308. gr.Markdown('1. Select the JSON file')
  309. shared.gradio['upload_json'] = gr.File(type='binary', file_types=['.json'])
  310. with gr.Column():
  311. gr.Markdown('2. Select your character\'s profile picture (optional)')
  312. shared.gradio['upload_img_bot'] = gr.File(type='binary', file_types=['image'])
  313. shared.gradio['Upload character'] = gr.Button(value='Submit')
  314. gr.Markdown("# TavernAI PNG format")
  315. shared.gradio['upload_img_tavern'] = gr.File(type='binary', file_types=['image'])
  316. with gr.Tab("Parameters", elem_id="parameters"):
  317. with gr.Box():
  318. gr.Markdown("Chat parameters")
  319. with gr.Row():
  320. with gr.Column():
  321. shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
  322. shared.gradio['chat_prompt_size_slider'] = gr.Slider(minimum=shared.settings['chat_prompt_size_min'], maximum=shared.settings['chat_prompt_size_max'], step=1, label='Maximum prompt size in tokens', value=shared.settings['chat_prompt_size'])
  323. with gr.Column():
  324. shared.gradio['chat_generation_attempts'] = gr.Slider(minimum=shared.settings['chat_generation_attempts_min'], maximum=shared.settings['chat_generation_attempts_max'], value=shared.settings['chat_generation_attempts'], step=1, label='Generation attempts (for longer replies)')
  325. shared.gradio['stop_at_newline'] = gr.Checkbox(value=shared.settings['stop_at_newline'], label='Stop generating at new line character?')
  326. create_settings_menus(default_preset)
  327. shared.input_params = [shared.gradio[k] for k in ['Chat input', 'generate_state', 'name1', 'name2', 'context', 'Chat mode', 'end_of_turn']]
  328. def set_chat_input(textbox):
  329. return textbox, ""
  330. gen_events.append(shared.gradio['Generate'].click(set_chat_input, shared.gradio['textbox'], [shared.gradio['Chat input'], shared.gradio['textbox']], show_progress=False))
  331. gen_events.append(shared.gradio['Generate'].click(chat.cai_chatbot_wrapper, shared.input_params, shared.gradio['display'], show_progress=shared.args.no_stream))
  332. gen_events.append(shared.gradio['textbox'].submit(set_chat_input, shared.gradio['textbox'], [shared.gradio['Chat input'], shared.gradio['textbox']], show_progress=False))
  333. gen_events.append(shared.gradio['textbox'].submit(chat.cai_chatbot_wrapper, shared.input_params, shared.gradio['display'], show_progress=shared.args.no_stream))
  334. gen_events.append(shared.gradio['Regenerate'].click(chat.regenerate_wrapper, shared.input_params, shared.gradio['display'], show_progress=shared.args.no_stream))
  335. gen_events.append(shared.gradio['Impersonate'].click(chat.impersonate_wrapper, shared.input_params, shared.gradio['textbox'], show_progress=shared.args.no_stream))
  336. shared.gradio['Stop'].click(stop_everything_event, [], [], queue=False, cancels=gen_events if shared.args.no_stream else None)
  337. shared.gradio['Copy last reply'].click(chat.send_last_reply_to_input, [], shared.gradio['textbox'], show_progress=shared.args.no_stream)
  338. shared.gradio['Replace last reply'].click(chat.replace_last_reply, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'Chat mode']], shared.gradio['display'], show_progress=shared.args.no_stream)
  339. # Clear history with confirmation
  340. clear_arr = [shared.gradio[k] for k in ['Clear history-confirm', 'Clear history', 'Clear history-cancel']]
  341. shared.gradio['Clear history'].click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, clear_arr)
  342. shared.gradio['Clear history-confirm'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr)
  343. shared.gradio['Clear history-confirm'].click(chat.clear_chat_log, [shared.gradio[k] for k in ['name1', 'name2', 'greeting', 'Chat mode']], shared.gradio['display'])
  344. shared.gradio['Clear history-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr)
  345. shared.gradio['Chat mode'].change(lambda x: gr.update(visible=x == 'instruct'), shared.gradio['Chat mode'], shared.gradio['Instruction templates'])
  346. shared.gradio['Remove last'].click(chat.remove_last_message, [shared.gradio[k] for k in ['name1', 'name2', 'Chat mode']], [shared.gradio['display'], shared.gradio['textbox']], show_progress=False)
  347. shared.gradio['download_button'].click(chat.save_history, inputs=[], outputs=[shared.gradio['download']])
  348. shared.gradio['Upload character'].click(chat.upload_character, [shared.gradio['upload_json'], shared.gradio['upload_img_bot']], [shared.gradio['character_menu']])
  349. # Clearing stuff and saving the history
  350. for i in ['Generate', 'Regenerate', 'Replace last reply']:
  351. shared.gradio[i].click(lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False)
  352. shared.gradio[i].click(lambda: chat.save_history(timestamp=False), [], [], show_progress=False)
  353. shared.gradio['Clear history-confirm'].click(lambda: chat.save_history(timestamp=False), [], [], show_progress=False)
  354. shared.gradio['textbox'].submit(lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False)
  355. shared.gradio['textbox'].submit(lambda: chat.save_history(timestamp=False), [], [], show_progress=False)
  356. shared.gradio['character_menu'].change(chat.load_character, [shared.gradio[k] for k in ['character_menu', 'name1', 'name2', 'Chat mode']], [shared.gradio[k] for k in ['name1', 'name2', 'character_picture', 'greeting', 'context', 'end_of_turn', 'display']])
  357. shared.gradio['Instruction templates'].change(lambda character, name1, name2, mode: chat.load_character(character, name1, name2, mode), [shared.gradio[k] for k in ['Instruction templates', 'name1', 'name2', 'Chat mode']], [shared.gradio[k] for k in ['name1', 'name2', 'character_picture', 'greeting', 'context', 'end_of_turn', 'display']])
  358. shared.gradio['upload_chat_history'].upload(chat.load_history, [shared.gradio[k] for k in ['upload_chat_history', 'name1', 'name2']], [])
  359. shared.gradio['upload_img_tavern'].upload(chat.upload_tavern_character, [shared.gradio['upload_img_tavern'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['character_menu']])
  360. shared.gradio['your_picture'].change(chat.upload_your_profile_picture, [shared.gradio[k] for k in ['your_picture', 'name1', 'name2', 'Chat mode']], shared.gradio['display'])
  361. reload_inputs = [shared.gradio[k] for k in ['name1', 'name2', 'Chat mode']]
  362. shared.gradio['upload_chat_history'].upload(chat.redraw_html, reload_inputs, [shared.gradio['display']])
  363. shared.gradio['Stop'].click(chat.redraw_html, reload_inputs, [shared.gradio['display']])
  364. shared.gradio['Instruction templates'].change(chat.redraw_html, reload_inputs, [shared.gradio['display']])
  365. shared.gradio['Chat mode'].change(chat.redraw_html, reload_inputs, [shared.gradio['display']])
  366. shared.gradio['interface'].load(None, None, None, _js=f"() => {{{ui.main_js+ui.chat_js}}}")
  367. shared.gradio['interface'].load(lambda: chat.load_default_history(shared.settings['name1'], shared.settings['name2']), None, None)
  368. shared.gradio['interface'].load(chat.redraw_html, reload_inputs, [shared.gradio['display']], show_progress=True)
  369. elif shared.args.notebook:
  370. with gr.Tab("Text generation", elem_id="main"):
  371. with gr.Row():
  372. with gr.Column(scale=4):
  373. with gr.Tab('Raw'):
  374. shared.gradio['textbox'] = gr.Textbox(value=default_text, elem_id="textbox", lines=27)
  375. with gr.Tab('Markdown'):
  376. shared.gradio['markdown'] = gr.Markdown()
  377. with gr.Tab('HTML'):
  378. shared.gradio['html'] = gr.HTML()
  379. with gr.Row():
  380. with gr.Column():
  381. with gr.Row():
  382. shared.gradio['Generate'] = gr.Button('Generate')
  383. shared.gradio['Stop'] = gr.Button('Stop')
  384. with gr.Column():
  385. pass
  386. with gr.Column(scale=1):
  387. gr.HTML('<div style="padding-bottom: 13px"></div>')
  388. shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
  389. create_prompt_menus()
  390. with gr.Tab("Parameters", elem_id="parameters"):
  391. create_settings_menus(default_preset)
  392. shared.input_params = [shared.gradio[k] for k in ['textbox', 'generate_state']]
  393. output_params = [shared.gradio[k] for k in ['textbox', 'markdown', 'html']]
  394. gen_events.append(shared.gradio['Generate'].click(generate_reply, shared.input_params, output_params, show_progress=shared.args.no_stream))
  395. gen_events.append(shared.gradio['textbox'].submit(generate_reply, shared.input_params, output_params, show_progress=shared.args.no_stream))
  396. shared.gradio['Stop'].click(stop_everything_event, [], [], queue=False, cancels=gen_events if shared.args.no_stream else None)
  397. shared.gradio['interface'].load(None, None, None, _js=f"() => {{{ui.main_js}}}")
  398. else:
  399. with gr.Tab("Text generation", elem_id="main"):
  400. with gr.Row():
  401. with gr.Column():
  402. shared.gradio['textbox'] = gr.Textbox(value=default_text, lines=21, label='Input')
  403. shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
  404. shared.gradio['Generate'] = gr.Button('Generate')
  405. with gr.Row():
  406. with gr.Column():
  407. shared.gradio['Continue'] = gr.Button('Continue')
  408. with gr.Column():
  409. shared.gradio['Stop'] = gr.Button('Stop')
  410. create_prompt_menus()
  411. with gr.Column():
  412. with gr.Tab('Raw'):
  413. shared.gradio['output_textbox'] = gr.Textbox(lines=27, label='Output')
  414. with gr.Tab('Markdown'):
  415. shared.gradio['markdown'] = gr.Markdown()
  416. with gr.Tab('HTML'):
  417. shared.gradio['html'] = gr.HTML()
  418. with gr.Tab("Parameters", elem_id="parameters"):
  419. create_settings_menus(default_preset)
  420. shared.input_params = [shared.gradio[k] for k in ['textbox', 'generate_state']]
  421. output_params = [shared.gradio[k] for k in ['output_textbox', 'markdown', 'html']]
  422. gen_events.append(shared.gradio['Generate'].click(generate_reply, shared.input_params, output_params, show_progress=shared.args.no_stream))
  423. gen_events.append(shared.gradio['textbox'].submit(generate_reply, shared.input_params, output_params, show_progress=shared.args.no_stream))
  424. gen_events.append(shared.gradio['Continue'].click(generate_reply, [shared.gradio['output_textbox']] + shared.input_params[1:], output_params, show_progress=shared.args.no_stream))
  425. shared.gradio['Stop'].click(stop_everything_event, [], [], queue=False, cancels=gen_events if shared.args.no_stream else None)
  426. shared.gradio['interface'].load(None, None, None, _js=f"() => {{{ui.main_js}}}")
  427. with gr.Tab("Model", elem_id="model-tab"):
  428. create_model_menus()
  429. with gr.Tab("Training", elem_id="training-tab"):
  430. training.create_train_interface()
  431. with gr.Tab("Interface mode", elem_id="interface-mode"):
  432. modes = ["default", "notebook", "chat", "cai_chat"]
  433. current_mode = "default"
  434. for mode in modes[1:]:
  435. if eval(f"shared.args.{mode}"):
  436. current_mode = mode
  437. break
  438. cmd_list = vars(shared.args)
  439. bool_list = [k for k in cmd_list if type(cmd_list[k]) is bool and k not in modes]
  440. bool_active = [k for k in bool_list if vars(shared.args)[k]]
  441. gr.Markdown("*Experimental*")
  442. shared.gradio['interface_modes_menu'] = gr.Dropdown(choices=modes, value=current_mode, label="Mode")
  443. shared.gradio['extensions_menu'] = gr.CheckboxGroup(choices=get_available_extensions(), value=shared.args.extensions, label="Available extensions")
  444. shared.gradio['bool_menu'] = gr.CheckboxGroup(choices=bool_list, value=bool_active, label="Boolean command-line flags")
  445. shared.gradio['reset_interface'] = gr.Button("Apply and restart the interface")
  446. shared.gradio['reset_interface'].click(set_interface_arguments, [shared.gradio[k] for k in ['interface_modes_menu', 'extensions_menu', 'bool_menu']], None)
  447. shared.gradio['reset_interface'].click(lambda: None, None, None, _js='() => {document.body.innerHTML=\'<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>\'; setTimeout(function(){location.reload()},2500); return []}')
  448. if shared.args.extensions is not None:
  449. extensions_module.create_extensions_block()
  450. def change_dict_value(d, key, value):
  451. d[key] = value
  452. return d
  453. for k in ['do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping', 'max_new_tokens', 'seed', 'stop_at_newline', 'chat_prompt_size_slider', 'chat_generation_attempts']:
  454. if k not in shared.gradio:
  455. continue
  456. if type(shared.gradio[k]) in [gr.Checkbox, gr.Number]:
  457. shared.gradio[k].change(lambda state, value, copy=k: change_dict_value(state, copy, value), inputs=[shared.gradio['generate_state'], shared.gradio[k]], outputs=shared.gradio['generate_state'])
  458. else:
  459. shared.gradio[k].release(lambda state, value, copy=k: change_dict_value(state, copy, value), inputs=[shared.gradio['generate_state'], shared.gradio[k]], outputs=shared.gradio['generate_state'])
  460. if not shared.is_chat():
  461. api.create_apis()
  462. # Authentication
  463. auth = None
  464. if shared.args.gradio_auth_path is not None:
  465. gradio_auth_creds = []
  466. with open(shared.args.gradio_auth_path, 'r', encoding="utf8") as file:
  467. for line in file.readlines():
  468. gradio_auth_creds += [x.strip() for x in line.split(',') if x.strip()]
  469. auth = [tuple(cred.split(':')) for cred in gradio_auth_creds]
  470. # Launch the interface
  471. shared.gradio['interface'].queue()
  472. if shared.args.listen:
  473. shared.gradio['interface'].launch(prevent_thread_lock=True, share=shared.args.share, server_name='0.0.0.0', server_port=shared.args.listen_port, inbrowser=shared.args.auto_launch, auth=auth)
  474. else:
  475. shared.gradio['interface'].launch(prevent_thread_lock=True, share=shared.args.share, server_port=shared.args.listen_port, inbrowser=shared.args.auto_launch, auth=auth)
  476. create_interface()
  477. while True:
  478. time.sleep(0.5)
  479. if shared.need_restart:
  480. shared.need_restart = False
  481. shared.gradio['interface'].close()
  482. create_interface()