Jelajahi Sumber

RWKV support prototype

oobabooga 2 tahun lalu
induk
melakukan
ebc64a408c
3 mengubah file dengan 42 tambahan dan 1 penghapusan
  1. 27 1
      modules/models.py
  2. 1 0
      modules/shared.py
  3. 14 0
      modules/text_generation.py

+ 27 - 1
modules/models.py

@@ -38,8 +38,10 @@ def load_model(model_name):
     print(f"Loading {model_name}...")
     t0 = time.time()
 
+    shared.is_RWKV = model_name.lower().startswith('rwkv-')
+
     # Default settings
-    if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen):
+    if not (shared.args.cpu or shared.args.load_in_8bit or shared.args.auto_devices or shared.args.disk or shared.args.gpu_memory is not None or shared.args.cpu_memory is not None or shared.args.deepspeed or shared.args.flexgen or shared.is_RWKV):
         if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
             model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
         else:
@@ -75,6 +77,30 @@ def load_model(model_name):
         model.module.eval() # Inference
         print(f"DeepSpeed ZeRO-3 is enabled: {is_deepspeed_zero3_enabled()}")
 
+    # RMKV model (not on HuggingFace)
+    elif shared.is_RWKV:
+        import types
+        np.set_printoptions(precision=4, suppress=True, linewidth=200)
+
+        os.environ['RWKV_JIT_ON'] = '1'
+        os.environ["RWKV_CUDA_ON"] = '0' #  '1' : use CUDA kernel for seq mode (much faster)
+
+        from rwkv.model import RWKV
+        from rwkv.utils import PIPELINE, PIPELINE_ARGS
+
+        model = RWKV(model='models/RWKV-4-Pile-169M-20220807-8023.pth', strategy='cuda fp16')
+
+        out, state = model.forward([187, 510, 1563, 310, 247], None)   # use 20B_tokenizer.json
+        print(out.detach().cpu().numpy())                   # get logits
+        out, state = model.forward([187, 510], None)
+        out, state = model.forward([1563], state)           # RNN has state (use deepcopy if you want to clone it)
+        out, state = model.forward([310, 247], state)
+        print(out.detach().cpu().numpy())                   # same result as above
+
+        pipeline = PIPELINE(model, "20B_tokenizer.json")
+
+        return pipeline, None
+
     # Custom
     else:
         command = "AutoModelForCausalLM.from_pretrained"

+ 1 - 0
modules/shared.py

@@ -5,6 +5,7 @@ tokenizer = None
 model_name = ""
 soft_prompt_tensor = None
 soft_prompt = False
+is_RWKV = False
 
 # Chat variables
 history = {'internal': [], 'visible': []}

+ 14 - 0
modules/text_generation.py

@@ -6,6 +6,7 @@ import numpy as np
 import torch
 import transformers
 from tqdm import tqdm
+from rwkv.utils import PIPELINE, PIPELINE_ARGS
 
 import modules.shared as shared
 from modules.extensions import apply_extensions
@@ -80,6 +81,19 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
     if not shared.args.cpu:
         torch.cuda.empty_cache()
 
+    if shared.is_RWKV:
+        def my_print(s):
+            print(s, end='', flush=True)
+        args = PIPELINE_ARGS(temperature = temperature, top_p = top_p,
+                             alpha_frequency = 0.25, # Frequency Penalty (as in GPT-3)
+                             alpha_presence = 0.25, # Presence Penalty (as in GPT-3)
+                             token_ban = [0], # ban the generation of some tokens
+                             token_stop = []) # stop generation whenever you see any token here
+        reply = question + shared.model.generate(question, token_count=max_new_tokens, args=args, callback=None)
+        print(formatted_outputs(reply, None))
+        yield formatted_outputs(reply, None)
+    return formatted_outputs(reply, None)
+
     original_question = question
     if not (shared.args.chat or shared.args.cai_chat):
         question = apply_extensions(question, "input")