script.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. import time
  2. from pathlib import Path
  3. import gradio as gr
  4. import torch
  5. from extensions.silero_tts import tts_preprocessor
  6. from modules import chat, shared
  7. from modules.html_generator import chat_html_wrapper
  8. torch._C._jit_set_profiling_mode(False)
  9. params = {
  10. 'activate': True,
  11. 'speaker': 'en_56',
  12. 'language': 'en',
  13. 'model_id': 'v3_en',
  14. 'sample_rate': 48000,
  15. 'device': 'cpu',
  16. 'show_text': False,
  17. 'autoplay': True,
  18. 'voice_pitch': 'medium',
  19. 'voice_speed': 'medium',
  20. }
  21. current_params = params.copy()
  22. voices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115']
  23. voice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high']
  24. voice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast']
  25. streaming_state = shared.args.no_stream # remember if chat streaming was enabled
  26. # Used for making text xml compatible, needed for voice pitch and speed control
  27. table = str.maketrans({
  28. "<": "&lt;",
  29. ">": "&gt;",
  30. "&": "&amp;",
  31. "'": "&apos;",
  32. '"': "&quot;",
  33. })
  34. def xmlesc(txt):
  35. return txt.translate(table)
  36. def load_model():
  37. model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id'])
  38. model.to(params['device'])
  39. return model
  40. model = load_model()
  41. def remove_tts_from_history(name1, name2, mode):
  42. for i, entry in enumerate(shared.history['internal']):
  43. shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]
  44. return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
  45. def toggle_text_in_history(name1, name2, mode):
  46. for i, entry in enumerate(shared.history['visible']):
  47. visible_reply = entry[1]
  48. if visible_reply.startswith('<audio'):
  49. if params['show_text']:
  50. reply = shared.history['internal'][i][1]
  51. shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>\n\n{reply}"]
  52. else:
  53. shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"]
  54. return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
  55. def input_modifier(string):
  56. """
  57. This function is applied to your text inputs before
  58. they are fed into the model.
  59. """
  60. # Remove autoplay from the last reply
  61. if shared.is_chat() and len(shared.history['internal']) > 0:
  62. shared.history['visible'][-1] = [shared.history['visible'][-1][0], shared.history['visible'][-1][1].replace('controls autoplay>','controls>')]
  63. shared.processing_message = "*Is recording a voice message...*"
  64. shared.args.no_stream = True # Disable streaming cause otherwise the audio output will stutter and begin anew every time the message is being updated
  65. return string
  66. def output_modifier(string):
  67. """
  68. This function is applied to the model outputs.
  69. """
  70. global model, current_params, streaming_state
  71. for i in params:
  72. if params[i] != current_params[i]:
  73. model = load_model()
  74. current_params = params.copy()
  75. break
  76. if params['activate'] == False:
  77. return string
  78. original_string = string
  79. string = tts_preprocessor.preprocess(string)
  80. if string == '':
  81. string = '*Empty reply, try regenerating*'
  82. else:
  83. output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{int(time.time())}.wav')
  84. prosody = '<prosody rate="{}" pitch="{}">'.format(params['voice_speed'], params['voice_pitch'])
  85. silero_input = f'<speak>{prosody}{xmlesc(string)}</prosody></speak>'
  86. model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
  87. autoplay = 'autoplay' if params['autoplay'] else ''
  88. string = f'<audio src="file/{output_file.as_posix()}" controls {autoplay}></audio>'
  89. if params['show_text']:
  90. string += f'\n\n{original_string}'
  91. shared.processing_message = "*Is typing...*"
  92. shared.args.no_stream = streaming_state # restore the streaming option to the previous value
  93. return string
  94. def bot_prefix_modifier(string):
  95. """
  96. This function is only applied in chat mode. It modifies
  97. the prefix text for the Bot and can be used to bias its
  98. behavior.
  99. """
  100. return string
  101. def ui():
  102. # Gradio elements
  103. with gr.Accordion("Silero TTS"):
  104. with gr.Row():
  105. activate = gr.Checkbox(value=params['activate'], label='Activate TTS')
  106. autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')
  107. show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')
  108. voice = gr.Dropdown(value=params['speaker'], choices=voices_by_gender, label='TTS voice')
  109. with gr.Row():
  110. v_pitch = gr.Dropdown(value=params['voice_pitch'], choices=voice_pitches, label='Voice pitch')
  111. v_speed = gr.Dropdown(value=params['voice_speed'], choices=voice_speeds, label='Voice speed')
  112. with gr.Row():
  113. convert = gr.Button('Permanently replace audios with the message texts')
  114. convert_cancel = gr.Button('Cancel', visible=False)
  115. convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False)
  116. # Convert history with confirmation
  117. convert_arr = [convert_confirm, convert, convert_cancel]
  118. convert.click(lambda :[gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
  119. convert_confirm.click(lambda :[gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
  120. convert_confirm.click(remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'Chat mode']], shared.gradio['display'])
  121. convert_confirm.click(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
  122. convert_cancel.click(lambda :[gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
  123. # Toggle message text in history
  124. show_text.change(lambda x: params.update({"show_text": x}), show_text, None)
  125. show_text.change(toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'Chat mode']], shared.gradio['display'])
  126. show_text.change(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
  127. # Event functions to update the parameters in the backend
  128. activate.change(lambda x: params.update({"activate": x}), activate, None)
  129. autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None)
  130. voice.change(lambda x: params.update({"speaker": x}), voice, None)
  131. v_pitch.change(lambda x: params.update({"voice_pitch": x}), v_pitch, None)
  132. v_speed.change(lambda x: params.update({"voice_speed": x}), v_speed, None)