text_generation.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. import gc
  2. import re
  3. import time
  4. import numpy as np
  5. import torch
  6. import transformers
  7. from tqdm import tqdm
  8. import modules.shared as shared
  9. from modules.extensions import apply_extensions
  10. from modules.html_generator import generate_4chan_html, generate_basic_html
  11. from modules.models import local_rank
  12. from modules.stopping_criteria import _SentinelTokenStoppingCriteria
  13. def get_max_prompt_length(tokens):
  14. max_length = 2048-tokens
  15. if shared.soft_prompt:
  16. max_length -= shared.soft_prompt_tensor.shape[1]
  17. return max_length
  18. def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
  19. input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)
  20. if shared.args.cpu or shared.args.flexgen:
  21. return input_ids
  22. elif shared.args.deepspeed:
  23. return input_ids.to(device=local_rank)
  24. else:
  25. return input_ids.cuda()
  26. def decode(output_ids):
  27. reply = shared.tokenizer.decode(output_ids, skip_special_tokens=True)
  28. reply = reply.replace(r'<|endoftext|>', '')
  29. return reply
  30. def generate_softprompt_input_tensors(input_ids):
  31. inputs_embeds = shared.model.transformer.wte(input_ids)
  32. inputs_embeds = torch.cat((shared.soft_prompt_tensor, inputs_embeds), dim=1)
  33. filler_input_ids = torch.zeros((1, inputs_embeds.shape[1]), dtype=input_ids.dtype).to(shared.model.device)
  34. #filler_input_ids += shared.model.config.bos_token_id # setting dummy input_ids to bos tokens
  35. return inputs_embeds, filler_input_ids
  36. # Removes empty replies from gpt4chan outputs
  37. def fix_gpt4chan(s):
  38. for i in range(10):
  39. s = re.sub("--- [0-9]*\n>>[0-9]*\n---", "---", s)
  40. s = re.sub("--- [0-9]*\n *\n---", "---", s)
  41. s = re.sub("--- [0-9]*\n\n\n---", "---", s)
  42. return s
  43. # Fix the LaTeX equations in galactica
  44. def fix_galactica(s):
  45. s = s.replace(r'\[', r'$')
  46. s = s.replace(r'\]', r'$')
  47. s = s.replace(r'\(', r'$')
  48. s = s.replace(r'\)', r'$')
  49. s = s.replace(r'$$', r'$')
  50. s = re.sub(r'\n', r'\n\n', s)
  51. s = re.sub(r"\n{3,}", "\n\n", s)
  52. return s
  53. def formatted_outputs(reply, model_name):
  54. if not (shared.args.chat or shared.args.cai_chat):
  55. if shared.model_name.lower().startswith('galactica'):
  56. reply = fix_galactica(reply)
  57. return reply, reply, generate_basic_html(reply)
  58. elif shared.model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')):
  59. reply = fix_gpt4chan(reply)
  60. return reply, 'Only applicable for GALACTICA models.', generate_4chan_html(reply)
  61. else:
  62. return reply, 'Only applicable for GALACTICA models.', generate_basic_html(reply)
  63. else:
  64. return reply
  65. def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=None, stopping_string=None):
  66. gc.collect()
  67. if not shared.args.cpu:
  68. torch.cuda.empty_cache()
  69. original_question = question
  70. if not (shared.args.chat or shared.args.cai_chat):
  71. question = apply_extensions(question, "input")
  72. if shared.args.verbose:
  73. print(f"\n\n{question}\n--------------------\n")
  74. input_ids = encode(question, max_new_tokens)
  75. cuda = "" if (shared.args.cpu or shared.args.deepspeed or shared.args.flexgen) else ".cuda()"
  76. if not shared.args.flexgen:
  77. n = shared.tokenizer.eos_token_id if eos_token is None else shared.tokenizer.encode(eos_token, return_tensors='pt')[0][-1]
  78. else:
  79. n = shared.tokenizer(eos_token).input_ids[0] if eos_token else None
  80. if stopping_string is not None:
  81. # The stopping_criteria code below was copied from
  82. # https://github.com/PygmalionAI/gradio-ui/blob/master/src/model.py
  83. t = encode(stopping_string, 0, add_special_tokens=False)
  84. stopping_criteria_list = transformers.StoppingCriteriaList([
  85. _SentinelTokenStoppingCriteria(
  86. sentinel_token_ids=t,
  87. starting_idx=len(input_ids[0])
  88. )
  89. ])
  90. else:
  91. stopping_criteria_list = None
  92. if not shared.args.flexgen:
  93. generate_params = [
  94. f"eos_token_id={n}",
  95. f"stopping_criteria=stopping_criteria_list",
  96. f"do_sample={do_sample}",
  97. f"temperature={temperature}",
  98. f"top_p={top_p}",
  99. f"typical_p={typical_p}",
  100. f"repetition_penalty={repetition_penalty}",
  101. f"top_k={top_k}",
  102. f"min_length={min_length if shared.args.no_stream else 0}",
  103. f"no_repeat_ngram_size={no_repeat_ngram_size}",
  104. f"num_beams={num_beams}",
  105. f"penalty_alpha={penalty_alpha}",
  106. f"length_penalty={length_penalty}",
  107. f"early_stopping={early_stopping}",
  108. ]
  109. else:
  110. generate_params = [
  111. f"do_sample={do_sample}",
  112. f"temperature={temperature}",
  113. f"stop={n}",
  114. ]
  115. if shared.args.deepspeed:
  116. generate_params.append("synced_gpus=True")
  117. if shared.args.no_stream:
  118. generate_params.append("max_new_tokens=max_new_tokens")
  119. else:
  120. generate_params.append("max_new_tokens=8")
  121. if shared.soft_prompt:
  122. inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
  123. generate_params.insert(0, "inputs_embeds=inputs_embeds")
  124. generate_params.insert(0, "filler_input_ids")
  125. else:
  126. generate_params.insert(0, "input_ids")
  127. # Generate the entire reply at once
  128. if shared.args.no_stream:
  129. t0 = time.time()
  130. with torch.no_grad():
  131. output = eval(f"shared.model.generate({', '.join(generate_params)}){cuda}")[0]
  132. if shared.soft_prompt:
  133. output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
  134. reply = decode(output)
  135. if not (shared.args.chat or shared.args.cai_chat):
  136. reply = original_question + apply_extensions(reply[len(question):], "output")
  137. yield formatted_outputs(reply, shared.model_name)
  138. t1 = time.time()
  139. print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output)-len(input_ids[0]))/(t1-t0)/8:.2f} it/s, {len(output)-len(input_ids[0])} tokens)")
  140. # Generate the reply 8 tokens at a time
  141. else:
  142. yield formatted_outputs(original_question, shared.model_name)
  143. for i in tqdm(range(max_new_tokens//8+1)):
  144. with torch.no_grad():
  145. output = eval(f"shared.model.generate({', '.join(generate_params)}){cuda}")[0]
  146. if shared.soft_prompt:
  147. output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
  148. reply = decode(output)
  149. if not (shared.args.chat or shared.args.cai_chat):
  150. reply = original_question + apply_extensions(reply[len(question):], "output")
  151. yield formatted_outputs(reply, shared.model_name)
  152. if not shared.args.flexgen:
  153. input_ids = torch.reshape(output, (1, output.shape[0]))
  154. else:
  155. input_ids = np.reshape(output, (1, output.shape[0]))
  156. if shared.soft_prompt:
  157. inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
  158. if output[-1] == n:
  159. break