فهرست منبع

Merge pull request #189 from oobabooga/new-streaming

New streaming method (much faster)
oobabooga 2 سال پیش
والد
کامیت
3437de686c
5فایلهای تغییر یافته به همراه182 افزوده شده و 133 حذف شده
  1. 6 40
      modules/RWKV.py
  2. 98 0
      modules/callbacks.py
  3. 0 32
      modules/stopping_criteria.py
  4. 78 58
      modules/text_generation.py
  5. 0 3
      server.py

+ 6 - 40
modules/RWKV.py

@@ -7,6 +7,7 @@ import numpy as np
 from tokenizers import Tokenizer
 
 import modules.shared as shared
+from modules.callbacks import Iteratorize
 
 np.set_printoptions(precision=4, suppress=True, linewidth=200)
 
@@ -49,11 +50,11 @@ class RWKVModel:
         return context+self.pipeline.generate(context, token_count=token_count, args=args, callback=callback)
 
     def generate_with_streaming(self, **kwargs):
-        iterable = Iteratorize(self.generate, kwargs, callback=None)
-        reply = kwargs['context']
-        for token in iterable:
-            reply += token
-            yield reply
+        with Iteratorize(self.generate, kwargs, callback=None) as generator:
+            reply = kwargs['context']
+            for token in generator:
+                reply += token
+                yield reply
 
 class RWKVTokenizer:
     def __init__(self):
@@ -73,38 +74,3 @@ class RWKVTokenizer:
 
     def decode(self, ids):
         return self.tokenizer.decode(ids)
-
-class Iteratorize:
-
-    """
-    Transforms a function that takes a callback
-    into a lazy iterator (generator).
-    """
-
-    def __init__(self, func, kwargs={}, callback=None):
-        self.mfunc=func
-        self.c_callback=callback
-        self.q = Queue(maxsize=1)
-        self.sentinel = object()
-        self.kwargs = kwargs
-
-        def _callback(val):
-            self.q.put(val)
-
-        def gentask():
-            ret = self.mfunc(callback=_callback, **self.kwargs)
-            self.q.put(self.sentinel)
-            if self.c_callback:
-                self.c_callback(ret)
-
-        Thread(target=gentask).start()
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        obj = self.q.get(True,None)
-        if obj is self.sentinel:
-            raise StopIteration
-        else:
-            return obj

+ 98 - 0
modules/callbacks.py

@@ -0,0 +1,98 @@
+import gc
+from queue import Queue
+from threading import Thread
+
+import torch
+import transformers
+
+import modules.shared as shared
+
+# Copied from https://github.com/PygmalionAI/gradio-ui/
+class _SentinelTokenStoppingCriteria(transformers.StoppingCriteria):
+
+    def __init__(self, sentinel_token_ids: torch.LongTensor,
+                 starting_idx: int):
+        transformers.StoppingCriteria.__init__(self)
+        self.sentinel_token_ids = sentinel_token_ids
+        self.starting_idx = starting_idx
+
+    def __call__(self, input_ids: torch.LongTensor,
+                 _scores: torch.FloatTensor) -> bool:
+        for sample in input_ids:
+            trimmed_sample = sample[self.starting_idx:]
+            # Can't unfold, output is still too tiny. Skip.
+            if trimmed_sample.shape[-1] < self.sentinel_token_ids.shape[-1]:
+                continue
+
+            for window in trimmed_sample.unfold(
+                    0, self.sentinel_token_ids.shape[-1], 1):
+                if torch.all(torch.eq(self.sentinel_token_ids, window)):
+                    return True
+        return False
+
+class Stream(transformers.StoppingCriteria):
+    def __init__(self, callback_func=None):
+        self.callback_func = callback_func
+
+    def __call__(self, input_ids, scores) -> bool:
+        if self.callback_func is not None:
+            self.callback_func(input_ids[0])
+        return False
+
+class Iteratorize:
+
+    """
+    Transforms a function that takes a callback
+    into a lazy iterator (generator).
+    """
+
+    def __init__(self, func, kwargs={}, callback=None):
+        self.mfunc=func
+        self.c_callback=callback
+        self.q = Queue()
+        self.sentinel = object()
+        self.kwargs = kwargs
+        self.stop_now = False
+
+        def _callback(val):
+            if self.stop_now:
+                raise ValueError
+            self.q.put(val)
+
+        def gentask():
+            try:
+                ret = self.mfunc(callback=_callback, **self.kwargs)
+            except ValueError:
+                pass
+            clear_torch_cache()
+            self.q.put(self.sentinel)
+            if self.c_callback:
+                self.c_callback(ret)
+
+        self.thread = Thread(target=gentask)
+        self.thread.start()
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        obj = self.q.get(True,None)
+        if obj is self.sentinel:
+            raise StopIteration
+        else:
+            return obj
+
+    def __del__(self):
+        clear_torch_cache()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.stop_now = True
+        clear_torch_cache()
+
+def clear_torch_cache():
+    gc.collect()
+    if not shared.args.cpu:
+        torch.cuda.empty_cache()

+ 0 - 32
modules/stopping_criteria.py

@@ -1,32 +0,0 @@
-'''
-This code was copied from
-
-https://github.com/PygmalionAI/gradio-ui/
-
-'''
-
-import torch
-import transformers
-
-
-class _SentinelTokenStoppingCriteria(transformers.StoppingCriteria):
-
-    def __init__(self, sentinel_token_ids: torch.LongTensor,
-                 starting_idx: int):
-        transformers.StoppingCriteria.__init__(self)
-        self.sentinel_token_ids = sentinel_token_ids
-        self.starting_idx = starting_idx
-
-    def __call__(self, input_ids: torch.LongTensor,
-                 _scores: torch.FloatTensor) -> bool:
-        for sample in input_ids:
-            trimmed_sample = sample[self.starting_idx:]
-            # Can't unfold, output is still too tiny. Skip.
-            if trimmed_sample.shape[-1] < self.sentinel_token_ids.shape[-1]:
-                continue
-
-            for window in trimmed_sample.unfold(
-                    0, self.sentinel_token_ids.shape[-1], 1):
-                if torch.all(torch.eq(self.sentinel_token_ids, window)):
-                    return True
-        return False

+ 78 - 58
modules/text_generation.py

@@ -5,13 +5,13 @@ import time
 import numpy as np
 import torch
 import transformers
-from tqdm import tqdm
 
 import modules.shared as shared
+from modules.callbacks import (Iteratorize, Stream,
+                               _SentinelTokenStoppingCriteria)
 from modules.extensions import apply_extensions
 from modules.html_generator import generate_4chan_html, generate_basic_html
 from modules.models import local_rank
-from modules.stopping_criteria import _SentinelTokenStoppingCriteria
 
 
 def get_max_prompt_length(tokens):
@@ -92,19 +92,22 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
     # These models are not part of Hugging Face, so we handle them
     # separately and terminate the function call earlier
     if shared.is_RWKV:
-        if shared.args.no_stream:
-            reply = shared.model.generate(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k)
-            yield formatted_outputs(reply, shared.model_name)
-        else:
-            yield formatted_outputs(question, shared.model_name)
-            # RWKV has proper streaming, which is very nice.
-            # No need to generate 8 tokens at a time.
-            for reply in shared.model.generate_with_streaming(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k):
+        try:
+            if shared.args.no_stream:
+                reply = shared.model.generate(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k)
                 yield formatted_outputs(reply, shared.model_name)
-
-        t1 = time.time()
-        print(f"Output generated in {(t1-t0):.2f} seconds.")
-        return
+            else:
+                yield formatted_outputs(question, shared.model_name)
+                # RWKV has proper streaming, which is very nice.
+                # No need to generate 8 tokens at a time.
+                for reply in shared.model.generate_with_streaming(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k):
+                    yield formatted_outputs(reply, shared.model_name)
+        finally:
+            t1 = time.time()
+            output = encode(reply)[0]
+            input_ids = encode(question)
+            print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output)-len(input_ids[0]))/(t1-t0):.2f} tokens/s, {len(output)-len(input_ids[0])} tokens)")
+            return
 
     original_question = question
     if not (shared.args.chat or shared.args.cai_chat):
@@ -113,23 +116,19 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
         print(f"\n\n{question}\n--------------------\n")
 
     input_ids = encode(question, max_new_tokens)
+    original_input_ids = input_ids
+    output = input_ids[0]
     cuda = "" if (shared.args.cpu or shared.args.deepspeed or shared.args.flexgen) else ".cuda()"
     n = shared.tokenizer.eos_token_id if eos_token is None else int(encode(eos_token)[0][-1])
+    stopping_criteria_list = transformers.StoppingCriteriaList()
     if stopping_string is not None:
-        # The stopping_criteria code below was copied from
-        # https://github.com/PygmalionAI/gradio-ui/blob/master/src/model.py
+        # Copied from https://github.com/PygmalionAI/gradio-ui/blob/master/src/model.py
         t = encode(stopping_string, 0, add_special_tokens=False)
-        stopping_criteria_list = transformers.StoppingCriteriaList([
-            _SentinelTokenStoppingCriteria(
-                sentinel_token_ids=t,
-                starting_idx=len(input_ids[0])
-            )
-        ])
-    else:
-        stopping_criteria_list = None
+        stopping_criteria_list.append(_SentinelTokenStoppingCriteria(sentinel_token_ids=t, starting_idx=len(input_ids[0])))
 
     if not shared.args.flexgen:
         generate_params = [
+            f"max_new_tokens=max_new_tokens",
             f"eos_token_id={n}",
             f"stopping_criteria=stopping_criteria_list",
             f"do_sample={do_sample}",
@@ -147,44 +146,23 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
         ]
     else:
         generate_params = [
+            f"max_new_tokens={max_new_tokens if shared.args.no_stream else 8}",
             f"do_sample={do_sample}",
             f"temperature={temperature}",
             f"stop={n}",
         ]
     if shared.args.deepspeed:
         generate_params.append("synced_gpus=True")
-    if shared.args.no_stream:
-        generate_params.append("max_new_tokens=max_new_tokens")
-    else:
-        generate_params.append("max_new_tokens=8")
     if shared.soft_prompt:
         inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
         generate_params.insert(0, "inputs_embeds=inputs_embeds")
-        generate_params.insert(0, "filler_input_ids")
-    else:
-        generate_params.insert(0, "input_ids")
-
-    # Generate the entire reply at once
-    if shared.args.no_stream:
-        with torch.no_grad():
-            output = eval(f"shared.model.generate({', '.join(generate_params)}){cuda}")[0]
-        if shared.soft_prompt:
-            output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
-
-        reply = decode(output)
-        if not (shared.args.chat or shared.args.cai_chat):
-            reply = original_question + apply_extensions(reply[len(question):], "output")
-
-        t1 = time.time()
-        print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output)-len(input_ids[0]))/(t1-t0)/8:.2f} it/s, {len(output)-len(input_ids[0])} tokens)")
-        yield formatted_outputs(reply, shared.model_name)
-
-    # Generate the reply 8 tokens at a time
+        generate_params.insert(0, "inputs=filler_input_ids")
     else:
-        yield formatted_outputs(original_question, shared.model_name)
-        for i in tqdm(range(max_new_tokens//8+1)):
-            clear_torch_cache()
+        generate_params.insert(0, "inputs=input_ids")
 
+    try:
+        # Generate the entire reply at once.
+        if shared.args.no_stream:
             with torch.no_grad():
                 output = eval(f"shared.model.generate({', '.join(generate_params)}){cuda}")[0]
             if shared.soft_prompt:
@@ -193,16 +171,58 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
             reply = decode(output)
             if not (shared.args.chat or shared.args.cai_chat):
                 reply = original_question + apply_extensions(reply[len(question):], "output")
+
             yield formatted_outputs(reply, shared.model_name)
 
-            if not shared.args.flexgen:
-                if output[-1] == n:
-                    break
-                input_ids = torch.reshape(output, (1, output.shape[0]))
-            else:
+        # Stream the reply 1 token at a time.
+        # This is based on the trick of using 'stopping_criteria' to create an iterator.
+        elif not shared.args.flexgen:
+
+            def generate_with_callback(callback=None, **kwargs):
+                kwargs['stopping_criteria'].append(Stream(callback_func=callback))
+                clear_torch_cache()
+                with torch.no_grad():
+                    shared.model.generate(**kwargs)
+
+            def generate_with_streaming(**kwargs):
+                return Iteratorize(generate_with_callback, kwargs, callback=None)
+
+            yield formatted_outputs(original_question, shared.model_name)
+            with eval(f"generate_with_streaming({', '.join(generate_params)})") as generator:
+                for output in generator:
+                    if shared.soft_prompt:
+                        output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
+                    reply = decode(output)
+
+                    if not (shared.args.chat or shared.args.cai_chat):
+                        reply = original_question + apply_extensions(reply[len(question):], "output")
+                    yield formatted_outputs(reply, shared.model_name)
+
+                    if output[-1] == n:
+                        break
+
+        # Stream the output naively for FlexGen since it doesn't support 'stopping_criteria'
+        else:
+            for i in range(max_new_tokens//8+1):
+                clear_torch_cache()
+                with torch.no_grad():
+                    output = eval(f"shared.model.generate({', '.join(generate_params)})")[0]
+                if shared.soft_prompt:
+                    output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
+                reply = decode(output)
+
+                if not (shared.args.chat or shared.args.cai_chat):
+                    reply = original_question + apply_extensions(reply[len(question):], "output")
+                yield formatted_outputs(reply, shared.model_name)
+
                 if np.count_nonzero(input_ids[0] == n) < np.count_nonzero(output == n):
                     break
+
                 input_ids = np.reshape(output, (1, output.shape[0]))
+                if shared.soft_prompt:
+                    inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
 
-            if shared.soft_prompt:
-                inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
+    finally:
+        t1 = time.time()
+        print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output)-len(original_input_ids[0]))/(t1-t0):.2f} tokens/s, {len(output)-len(original_input_ids[0])} tokens)")
+        return

+ 0 - 3
server.py

@@ -18,9 +18,6 @@ from modules.html_generator import generate_chat_html
 from modules.models import load_model, load_soft_prompt
 from modules.text_generation import generate_reply
 
-if (shared.args.chat or shared.args.cai_chat) and not shared.args.no_stream:
-    print('Warning: chat mode currently becomes somewhat slower with text streaming on.\nConsider starting the web UI with the --no-stream option.\n')
-    
 # Loading custom settings
 settings_file = None
 if shared.args.settings is not None and Path(shared.args.settings).exists():