text_generation.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. import re
  2. import time
  3. import numpy as np
  4. import torch
  5. import transformers
  6. from tqdm import tqdm
  7. import modules.shared as shared
  8. from modules.extensions import apply_extensions
  9. from modules.html_generator import generate_4chan_html, generate_basic_html
  10. from modules.models import local_rank
  11. from modules.stopping_criteria import _SentinelTokenStoppingCriteria
  12. def get_max_prompt_length(tokens):
  13. max_length = 2048-tokens
  14. if shared.soft_prompt:
  15. max_length -= shared.soft_prompt_tensor.shape[1]
  16. return max_length
  17. def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
  18. input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)
  19. if shared.args.cpu or shared.args.flexgen:
  20. return input_ids
  21. elif shared.args.deepspeed:
  22. return input_ids.to(device=local_rank)
  23. else:
  24. return input_ids.cuda()
  25. def decode(output_ids):
  26. reply = shared.tokenizer.decode(output_ids, skip_special_tokens=True)
  27. reply = reply.replace(r'<|endoftext|>', '')
  28. return reply
  29. def generate_softprompt_input_tensors(input_ids):
  30. inputs_embeds = shared.model.transformer.wte(input_ids)
  31. inputs_embeds = torch.cat((shared.soft_prompt_tensor, inputs_embeds), dim=1)
  32. filler_input_ids = torch.zeros((1, inputs_embeds.shape[1]), dtype=input_ids.dtype).to(shared.model.device)
  33. filler_input_ids += shared.model.config.bos_token_id # setting dummy input_ids to bos tokens
  34. return inputs_embeds, filler_input_ids
  35. # Removes empty replies from gpt4chan outputs
  36. def fix_gpt4chan(s):
  37. for i in range(10):
  38. s = re.sub("--- [0-9]*\n>>[0-9]*\n---", "---", s)
  39. s = re.sub("--- [0-9]*\n *\n---", "---", s)
  40. s = re.sub("--- [0-9]*\n\n\n---", "---", s)
  41. return s
  42. # Fix the LaTeX equations in galactica
  43. def fix_galactica(s):
  44. s = s.replace(r'\[', r'$')
  45. s = s.replace(r'\]', r'$')
  46. s = s.replace(r'\(', r'$')
  47. s = s.replace(r'\)', r'$')
  48. s = s.replace(r'$$', r'$')
  49. s = re.sub(r'\n', r'\n\n', s)
  50. s = re.sub(r"\n{3,}", "\n\n", s)
  51. return s
  52. def formatted_outputs(reply, model_name):
  53. if not (shared.args.chat or shared.args.cai_chat):
  54. if shared.model_name.lower().startswith('galactica'):
  55. reply = fix_galactica(reply)
  56. return reply, reply, generate_basic_html(reply)
  57. elif shared.model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')):
  58. reply = fix_gpt4chan(reply)
  59. return reply, 'Only applicable for GALACTICA models.', generate_4chan_html(reply)
  60. else:
  61. return reply, 'Only applicable for GALACTICA models.', generate_basic_html(reply)
  62. else:
  63. return reply
  64. def generate_reply(question, tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=None, stopping_string=None):
  65. original_question = question
  66. if not (shared.args.chat or shared.args.cai_chat):
  67. question = apply_extensions(question, "input")
  68. if shared.args.verbose:
  69. print(f"\n\n{question}\n--------------------\n")
  70. input_ids = encode(question, tokens)
  71. cuda = "" if (shared.args.cpu or shared.args.deepspeed or shared.args.flexgen) else ".cuda()"
  72. if not shared.args.flexgen:
  73. n = shared.tokenizer.eos_token_id if eos_token is None else shared.tokenizer.encode(eos_token, return_tensors='pt')[0][-1]
  74. else:
  75. n = shared.tokenizer(eos_token).input_ids[0] if eos_token else None
  76. if stopping_string is not None:
  77. # The stopping_criteria code below was copied from
  78. # https://github.com/PygmalionAI/gradio-ui/blob/master/src/model.py
  79. t = encode(stopping_string, 0, add_special_tokens=False)
  80. stopping_criteria_list = transformers.StoppingCriteriaList([
  81. _SentinelTokenStoppingCriteria(
  82. sentinel_token_ids=t,
  83. starting_idx=len(input_ids[0])
  84. )
  85. ])
  86. else:
  87. stopping_criteria_list = None
  88. if not shared.args.flexgen:
  89. generate_params = [
  90. f"eos_token_id={n}",
  91. f"stopping_criteria=stopping_criteria_list",
  92. f"do_sample={do_sample}",
  93. f"temperature={temperature}",
  94. f"top_p={top_p}",
  95. f"typical_p={typical_p}",
  96. f"repetition_penalty={repetition_penalty}",
  97. f"top_k={top_k}",
  98. f"min_length={min_length if shared.args.no_stream else 0}",
  99. f"no_repeat_ngram_size={no_repeat_ngram_size}",
  100. f"num_beams={num_beams}",
  101. f"penalty_alpha={penalty_alpha}",
  102. f"length_penalty={length_penalty}",
  103. f"early_stopping={early_stopping}",
  104. ]
  105. else:
  106. generate_params = [
  107. f"do_sample={do_sample}",
  108. f"temperature={temperature}",
  109. f"stop={n}",
  110. ]
  111. if shared.args.deepspeed:
  112. generate_params.append("synced_gpus=True")
  113. if shared.args.no_stream:
  114. generate_params.append(f"max_new_tokens=tokens")
  115. else:
  116. generate_params.append(f"max_new_tokens=8")
  117. if shared.soft_prompt:
  118. inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
  119. generate_params.insert(0, "inputs_embeds=inputs_embeds")
  120. generate_params.insert(0, "filler_input_ids")
  121. else:
  122. generate_params.insert(0, "input_ids")
  123. # Generate the entire reply at once
  124. if shared.args.no_stream:
  125. t0 = time.time()
  126. with torch.no_grad():
  127. output = eval(f"shared.model.generate({', '.join(generate_params)}){cuda}")[0]
  128. if shared.soft_prompt:
  129. output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
  130. reply = decode(output)
  131. if not (shared.args.chat or shared.args.cai_chat):
  132. reply = original_question + apply_extensions(reply[len(question):], "output")
  133. yield formatted_outputs(reply, shared.model_name)
  134. t1 = time.time()
  135. print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output)-len(input_ids[0]))/(t1-t0)/8:.2f} it/s, {len(output)-len(input_ids[0])} tokens)")
  136. # Generate the reply 8 tokens at a time
  137. else:
  138. yield formatted_outputs(original_question, shared.model_name)
  139. for i in tqdm(range(tokens//8+1)):
  140. with torch.no_grad():
  141. output = eval(f"shared.model.generate({', '.join(generate_params)}){cuda}")[0]
  142. if shared.soft_prompt:
  143. output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
  144. reply = decode(output)
  145. if not (shared.args.chat or shared.args.cai_chat):
  146. reply = original_question + apply_extensions(reply[len(question):], "output")
  147. yield formatted_outputs(reply, shared.model_name)
  148. if not shared.args.flexgen:
  149. input_ids = torch.reshape(output, (1, output.shape[0]))
  150. else:
  151. input_ids = np.reshape(output, (1, output.shape[0]))
  152. if shared.soft_prompt:
  153. inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
  154. if output[-1] == n:
  155. break