script.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. import time
  2. from pathlib import Path
  3. import gradio as gr
  4. import torch
  5. import re
  6. import modules.chat as chat
  7. import modules.shared as shared
  8. torch._C._jit_set_profiling_mode(False)
  9. params = {
  10. 'activate': True,
  11. 'speaker': 'en_56',
  12. 'language': 'en',
  13. 'model_id': 'v3_en',
  14. 'sample_rate': 48000,
  15. 'device': 'cpu',
  16. 'show_text': False,
  17. 'autoplay': True,
  18. 'voice_pitch': 'medium',
  19. 'voice_speed': 'medium',
  20. }
  21. current_params = params.copy()
  22. voices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115']
  23. voice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high']
  24. voice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast']
  25. # Used for making text xml compatible, needed for voice pitch and speed control
  26. table = str.maketrans({
  27. "<": "&lt;",
  28. ">": "&gt;",
  29. "&": "&amp;",
  30. "'": "&apos;",
  31. '"': "&quot;",
  32. })
  33. def xmlesc(txt):
  34. return txt.translate(table)
  35. def load_model():
  36. model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id'])
  37. model.to(params['device'])
  38. return model
  39. model = load_model()
  40. def remove_surrounded_chars(string):
  41. # regexp is way faster than repeated string concatenation!
  42. # this expression matches to 'as few symbols as possible (0 upwards) between any asterisks' OR
  43. # 'as few symbols as possible (0 upwards) between an asterisk and the end of the string'
  44. return re.sub('\*[^\*]*?(\*|$)','',string)
  45. def remove_tts_from_history(name1, name2):
  46. for i, entry in enumerate(shared.history['internal']):
  47. shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]
  48. return chat.generate_chat_output(shared.history['visible'], name1, name2, shared.character)
  49. def toggle_text_in_history(name1, name2):
  50. for i, entry in enumerate(shared.history['visible']):
  51. visible_reply = entry[1]
  52. if visible_reply.startswith('<audio'):
  53. if params['show_text']:
  54. reply = shared.history['internal'][i][1]
  55. shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>\n\n{reply}"]
  56. else:
  57. shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"]
  58. return chat.generate_chat_output(shared.history['visible'], name1, name2, shared.character)
  59. def input_modifier(string):
  60. """
  61. This function is applied to your text inputs before
  62. they are fed into the model.
  63. """
  64. # Remove autoplay from the last reply
  65. if (shared.args.chat or shared.args.cai_chat) and len(shared.history['internal']) > 0:
  66. shared.history['visible'][-1] = [shared.history['visible'][-1][0], shared.history['visible'][-1][1].replace('controls autoplay>','controls>')]
  67. shared.processing_message = "*Is recording a voice message...*"
  68. return string
  69. def output_modifier(string):
  70. """
  71. This function is applied to the model outputs.
  72. """
  73. global model, current_params
  74. for i in params:
  75. if params[i] != current_params[i]:
  76. model = load_model()
  77. current_params = params.copy()
  78. break
  79. if params['activate'] == False:
  80. return string
  81. original_string = string
  82. string = remove_surrounded_chars(string)
  83. string = string.replace('"', '')
  84. string = string.replace('“', '')
  85. string = string.replace('\n', ' ')
  86. string = string.strip()
  87. if string == '':
  88. string = '*Empty reply, try regenerating*'
  89. else:
  90. output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{int(time.time())}.wav')
  91. prosody = '<prosody rate="{}" pitch="{}">'.format(params['voice_speed'], params['voice_pitch'])
  92. silero_input = f'<speak>{prosody}{xmlesc(string)}</prosody></speak>'
  93. model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
  94. autoplay = 'autoplay' if params['autoplay'] else ''
  95. string = f'<audio src="file/{output_file.as_posix()}" controls {autoplay}></audio>'
  96. if params['show_text']:
  97. string += f'\n\n{original_string}'
  98. shared.processing_message = "*Is typing...*"
  99. return string
  100. def bot_prefix_modifier(string):
  101. """
  102. This function is only applied in chat mode. It modifies
  103. the prefix text for the Bot and can be used to bias its
  104. behavior.
  105. """
  106. return string
  107. def ui():
  108. # Gradio elements
  109. with gr.Accordion("Silero TTS"):
  110. with gr.Row():
  111. activate = gr.Checkbox(value=params['activate'], label='Activate TTS')
  112. autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')
  113. show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')
  114. voice = gr.Dropdown(value=params['speaker'], choices=voices_by_gender, label='TTS voice')
  115. with gr.Row():
  116. v_pitch = gr.Dropdown(value=params['voice_pitch'], choices=voice_pitches, label='Voice pitch')
  117. v_speed = gr.Dropdown(value=params['voice_speed'], choices=voice_speeds, label='Voice speed')
  118. with gr.Row():
  119. convert = gr.Button('Permanently replace audios with the message texts')
  120. convert_cancel = gr.Button('Cancel', visible=False)
  121. convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False)
  122. # Convert history with confirmation
  123. convert_arr = [convert_confirm, convert, convert_cancel]
  124. convert.click(lambda :[gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
  125. convert_confirm.click(lambda :[gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
  126. convert_confirm.click(remove_tts_from_history, [shared.gradio['name1'], shared.gradio['name2']], shared.gradio['display'])
  127. convert_confirm.click(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
  128. convert_cancel.click(lambda :[gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
  129. # Toggle message text in history
  130. show_text.change(lambda x: params.update({"show_text": x}), show_text, None)
  131. show_text.change(toggle_text_in_history, [shared.gradio['name1'], shared.gradio['name2']], shared.gradio['display'])
  132. show_text.change(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
  133. # Event functions to update the parameters in the backend
  134. activate.change(lambda x: params.update({"activate": x}), activate, None)
  135. autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None)
  136. voice.change(lambda x: params.update({"speaker": x}), voice, None)
  137. v_pitch.change(lambda x: params.update({"voice_pitch": x}), v_pitch, None)
  138. v_speed.change(lambda x: params.update({"voice_speed": x}), v_speed, None)