Explorar el Código

Move RWKV loader into a separate file

oobabooga hace 2 años
padre
commit
70e522732c
Se han modificado 3 ficheros con 29 adiciones y 24 borrados
  1. 26 0
      modules/RWKV.py
  2. 2 20
      modules/models.py
  3. 1 4
      modules/text_generation.py

+ 26 - 0
modules/RWKV.py

@@ -0,0 +1,26 @@
+import os, time, types, torch
+from pathlib import Path
+import numpy as np
+np.set_printoptions(precision=4, suppress=True, linewidth=200)
+
+os.environ['RWKV_JIT_ON'] = '1'
+os.environ["RWKV_CUDA_ON"] = '0' #  '1' : use CUDA kernel for seq mode (much faster)
+
+import repositories.ChatRWKV.v2.rwkv as rwkv
+from rwkv.model import RWKV
+from rwkv.utils import PIPELINE, PIPELINE_ARGS
+
+def load_RWKV_model(path):
+    os.system("ls")
+    model = RWKV(model=path.as_posix(), strategy='cuda fp16')
+
+    out, state = model.forward([187, 510, 1563, 310, 247], None)   # use 20B_tokenizer.json
+    print(out.detach().cpu().numpy())                   # get logits
+    out, state = model.forward([187, 510], None)
+    out, state = model.forward([1563], state)           # RNN has state (use deepcopy if you want to clone it)
+    out, state = model.forward([310, 247], state)
+    print(out.detach().cpu().numpy())                   # same result as above
+
+    pipeline = PIPELINE(model, Path("repositories/ChatRWKV/20B_tokenizer.json").as_posix())
+
+    return pipeline

+ 2 - 20
modules/models.py

@@ -79,27 +79,9 @@ def load_model(model_name):
 
     # RMKV model (not on HuggingFace)
     elif shared.is_RWKV:
-        import types
-        np.set_printoptions(precision=4, suppress=True, linewidth=200)
+        from modules.RWKV import load_RWKV_model
 
-        os.environ['RWKV_JIT_ON'] = '1'
-        os.environ["RWKV_CUDA_ON"] = '0' #  '1' : use CUDA kernel for seq mode (much faster)
-
-        from rwkv.model import RWKV
-        from rwkv.utils import PIPELINE, PIPELINE_ARGS
-
-        model = RWKV(model='models/RWKV-4-Pile-169M-20220807-8023.pth', strategy='cuda fp16')
-
-        out, state = model.forward([187, 510, 1563, 310, 247], None)   # use 20B_tokenizer.json
-        print(out.detach().cpu().numpy())                   # get logits
-        out, state = model.forward([187, 510], None)
-        out, state = model.forward([1563], state)           # RNN has state (use deepcopy if you want to clone it)
-        out, state = model.forward([310, 247], state)
-        print(out.detach().cpu().numpy())                   # same result as above
-
-        pipeline = PIPELINE(model, "20B_tokenizer.json")
-
-        return pipeline, None
+        return load_RWKV_model(Path('models/RWKV-4-Pile-169M-20220807-8023.pth')), None
 
     # Custom
     else:

+ 1 - 4
modules/text_generation.py

@@ -82,17 +82,14 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
         torch.cuda.empty_cache()
 
     if shared.is_RWKV:
-        def my_print(s):
-            print(s, end='', flush=True)
         args = PIPELINE_ARGS(temperature = temperature, top_p = top_p,
                              alpha_frequency = 0.25, # Frequency Penalty (as in GPT-3)
                              alpha_presence = 0.25, # Presence Penalty (as in GPT-3)
                              token_ban = [0], # ban the generation of some tokens
                              token_stop = []) # stop generation whenever you see any token here
         reply = question + shared.model.generate(question, token_count=max_new_tokens, args=args, callback=None)
-        print(formatted_outputs(reply, None))
         yield formatted_outputs(reply, None)
-    return formatted_outputs(reply, None)
+        return formatted_outputs(reply, None)
 
     original_question = question
     if not (shared.args.chat or shared.args.cai_chat):