tts_preprocessor.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. import re
  2. from num2words import num2words
  3. alphabet_map = {
  4. "A": " Ei ",
  5. "B": " Bee ",
  6. "C": " See ",
  7. "D": " Dee ",
  8. "E": " Ii ",
  9. "F": " Eff ",
  10. "G": " Jee ",
  11. "H": " Eich ",
  12. "I": " Eye ",
  13. "J": " Jay ",
  14. "K": " Kay ",
  15. "L": " El ",
  16. "M": " Emm ",
  17. "N": " Enn ",
  18. "O": " Ohh ",
  19. "P": " Pii ",
  20. "Q": " Queue ",
  21. "R": " Are ",
  22. "S": " Ess ",
  23. "T": " Tee ",
  24. "U": " You ",
  25. "V": " Vii ",
  26. "W": " Double You ",
  27. "X": " Ex ",
  28. "Y": " Why ",
  29. "Z": "Zed" # Zed is weird, as I (da3dsoul) am American, but most of the voice models sound British, so it matches
  30. }
  31. def preprocess(string):
  32. string = remove_surrounded_chars(string)
  33. string = string.replace('"', '')
  34. string = string.replace('“', '')
  35. string = string.replace('\n', ' ')
  36. string = remove_commas(string)
  37. string = replace_roman(string)
  38. string = hyphen_range_to(string)
  39. string = num_to_words(string)
  40. # TODO Try to use a ML predictor to expand abbreviations. It's hard, dependent on context, and whether to actually
  41. # try to say the abbreviation or spell it out as I've done below is not agreed upon
  42. # For now, expand abbreviations to pronunciations
  43. string = replace_abbreviations(string)
  44. string = string.strip()
  45. return string
  46. def remove_surrounded_chars(string):
  47. # this expression matches to 'as few symbols as possible (0 upwards) between any asterisks' OR
  48. # 'as few symbols as possible (0 upwards) between an asterisk and the end of the string'
  49. return re.sub(r'\*[^*]*?(\*|$)', '', string)
  50. def replace_roman(string):
  51. pattern = re.compile(r'\s[IVXLCDM]+[\s,.?!)"\'\]>]')
  52. result = string
  53. while True:
  54. match = pattern.search(result)
  55. if match is None:
  56. break
  57. start = match.start()
  58. end = match.end()
  59. result = result[0:start+1] + str(roman_to_int(result[start+1:end-1])) + result[end-1:len(result)]
  60. return result
  61. def roman_to_int(s):
  62. rom_val = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
  63. int_val = 0
  64. for i in range(len(s)):
  65. if i > 0 and rom_val[s[i]] > rom_val[s[i - 1]]:
  66. int_val += rom_val[s[i]] - 2 * rom_val[s[i - 1]]
  67. else:
  68. int_val += rom_val[s[i]]
  69. return int_val
  70. def hyphen_range_to(text):
  71. pattern = re.compile(r'(\d+)[-–](\d+)')
  72. result = pattern.sub(lambda x: x.group(1) + ' to ' + x.group(2), text)
  73. return result
  74. def num_to_words(text):
  75. pattern = re.compile(r'\d+')
  76. result = pattern.sub(lambda x: num2words(int(x.group())), text)
  77. return result
  78. def replace_abbreviations(string):
  79. pattern = re.compile(r'[\s("\'\[<][A-Z]{2,4}[\s,.?!)"\'\]>]')
  80. result = string
  81. while True:
  82. match = pattern.search(result)
  83. if match is None:
  84. break
  85. start = match.start()
  86. end = match.end()
  87. result = result[0:start] + replace_abbreviation(result[start:end]) + result[end:len(result)]
  88. return result
  89. def replace_abbreviation(string):
  90. result = ""
  91. for char in string:
  92. result = match_mapping(char, result)
  93. return result
  94. def match_mapping(char, result):
  95. for mapping in alphabet_map.keys():
  96. if char == mapping:
  97. return result + alphabet_map[char]
  98. return result + char
  99. def remove_commas(text):
  100. import re
  101. pattern = re.compile(r'(\d),(\d)')
  102. result = pattern.sub(r'\1\2', text)
  103. return result
  104. def __main__(args):
  105. print(preprocess(args[1]))
  106. if __name__ == "__main__":
  107. import sys
  108. __main__(sys.argv)