Sfoglia il codice sorgente

Merge branch 'main' into Zerogoki00-opt4-bit

oobabooga 2 anni fa
parent
commit
3da73e409f

+ 1 - 0
.github/FUNDING.yml

@@ -0,0 +1 @@
+ko_fi: oobabooga

+ 53 - 0
.github/ISSUE_TEMPLATE/bug_report_template.yml

@@ -0,0 +1,53 @@
+name: "Bug report"
+description: Report a bug
+labels: [ "bug" ]
+body:
+  - type: markdown
+    attributes:
+      value: |
+        Thanks for taking the time to fill out this bug report!
+  - type: textarea
+    id: bug-description
+    attributes:
+      label: Describe the bug
+      description: A clear and concise description of what the bug is.
+      placeholder: Bug description
+    validations:
+      required: true
+  - type: checkboxes
+    attributes:
+      label: Is there an existing issue for this?
+      description: Please search to see if an issue already exists for the issue you encountered.
+      options:
+        - label: I have searched the existing issues
+          required: true
+  - type: textarea
+    id: reproduction
+    attributes:
+      label: Reproduction
+      description: Please provide the steps necessary to reproduce your issue.
+      placeholder: Reproduction
+    validations:
+      required: true
+  - type: textarea
+    id: screenshot
+    attributes:
+      label: Screenshot
+      description: "If possible, please include screenshot(s) so that we can understand what the issue is."
+  - type: textarea
+    id: logs
+    attributes:
+      label: Logs
+      description: "Please include the full stacktrace of the errors you get in the command-line (if any)."
+      render: shell
+    validations:
+      required: true
+  - type: textarea
+    id: system-info
+    attributes:
+      label: System Info
+      description: "Please share your system info with us: operating system, GPU brand, and GPU model. If you are using a Google Colab notebook, mention that instead."
+      render: shell
+      placeholder: 
+    validations:
+      required: true

+ 16 - 0
.github/ISSUE_TEMPLATE/feature_request.md

@@ -0,0 +1,16 @@
+---
+name: Feature request
+about: Suggest an improvement or new feature for the web UI
+title: ''
+labels: 'enhancement'
+assignees: ''
+
+---
+
+**Description**
+
+A clear and concise description of what you want to be implemented.
+
+**Additional Context**
+
+If applicable, please provide any extra information, external links, or screenshots that could be useful.

+ 11 - 0
.github/dependabot.yml

@@ -0,0 +1,11 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
+
+version: 2
+updates:
+  - package-ecosystem: "pip" # See documentation for possible values
+    directory: "/" # Location of package manifests
+    schedule:
+      interval: "weekly"

+ 22 - 0
.github/workflows/stale.yml

@@ -0,0 +1,22 @@
+name: Close inactive issues
+on:
+  schedule:
+    - cron: "10 23 * * *"
+
+jobs:
+  close-issues:
+    runs-on: ubuntu-latest
+    permissions:
+      issues: write
+      pull-requests: write
+    steps:
+      - uses: actions/stale@v5
+        with:
+          stale-issue-message: ""
+          close-issue-message: "This issue has been closed due to inactivity for 30 days. If you believe it is still relevant, you can reopen it (if you are the author) or leave a comment below."
+          days-before-issue-stale: 30
+          days-before-issue-close: 0
+          stale-issue-label: "stale"
+          days-before-pr-stale: -1
+          days-before-pr-close: -1
+          repo-token: ${{ secrets.GITHUB_TOKEN }}

+ 36 - 35
README.md

@@ -60,8 +60,7 @@ pip3 install torch torchvision torchaudio --extra-index-url https://download.pyt
 conda install pytorch torchvision torchaudio git -c pytorch
 conda install pytorch torchvision torchaudio git -c pytorch
 ```
 ```
 
 
-See also: [Installation instructions for human beings
-](https://github.com/oobabooga/text-generation-webui/wiki/Installation-instructions-for-human-beings)
+See also: [Installation instructions for human beings](https://github.com/oobabooga/text-generation-webui/wiki/Installation-instructions-for-human-beings).
 
 
 ## Installation option 2: one-click installers
 ## Installation option 2: one-click installers
 
 
@@ -132,39 +131,41 @@ Then browse to
 
 
 Optionally, you can use the following command-line flags:
 Optionally, you can use the following command-line flags:
 
 
-| Flag                                       | Description                                                                                                                                                                                                                                                                                   |
-|--------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `-h`, `--help`                             | show this help message and exit                                                                                                                                                                                                                                                               |
-| `--model MODEL`                            | Name of the model to load by default.                                                                                                                                                                                                                                                         |
-| `--notebook`                               | Launch the web UI in notebook mode, where the output is written to the same text box as the input.                                                                                                                                                                                            |
-| `--chat`                                   | Launch the web UI in chat mode.                                                                                                                                                                                                                                                               |
-| `--cai-chat`                               | Launch the web UI in chat mode with a style similar to Character.AI's. If the file `img_bot.png` or `img_bot.jpg` exists in the same folder as server.py, this image will be used as the bot's profile picture. Similarly, `img_me.png` or `img_me.jpg` will be used as your profile picture. |
-| `--cpu`                                    | Use the CPU to generate text.                                                                                                                                                                                                                                                                 |
-| `--load-in-8bit`                           | Load the model with 8-bit precision.                                                                                                                                                                                                                                                          |
-| `--gptq-bits GPTQ_BITS`                    | Load a pre-quantized model with specified precision. 2, 3, 4 and 8 (bit) are supported. Currently only works with LLaMA and OPT.                                                                                                                                                              |
-| `--gptq-model-type MODEL_TYPE`             | Model type of pre-quantized model. Currently only LLaMa and OPT are supported.                                                                                                                                                                                                                |
-| `--bf16`                                   | Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.                                                                                                                                                                                                                           |
-| `--auto-devices`                           | Automatically split the model across the available GPU(s) and CPU.                                                                                                                                                                                                                            |
-| `--disk`                                   | If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.                                                                                                                                                                                            |
-| `--disk-cache-dir DISK_CACHE_DIR`          | Directory to save the disk cache to. Defaults to `cache/`.                                                                                                                                                                                                                                    |
-| `--gpu-memory GPU_MEMORY [GPU_MEMORY ...]` | Maxmimum GPU memory in GiB to be allocated per GPU. Example: `--gpu-memory 10` for a single GPU, `--gpu-memory 10 5` for two GPUs.                                                                                                                                                            |
-| `--cpu-memory CPU_MEMORY`                  | Maximum CPU memory in GiB to allocate for offloaded weights. Must be an integer number. Defaults to 99.                                                                                                                                                                                       |
-| `--flexgen`                                | Enable the use of FlexGen offloading.                                                                                                                                                                                                                                                         |
-| `--percent PERCENT [PERCENT ...]`          | FlexGen: allocation percentages. Must be 6 numbers separated by spaces (default: 0, 100, 100, 0, 100, 0).                                                                                                                                                                                     |
-| `--compress-weight`                        | FlexGen: Whether to compress weight (default: False).                                                                                                                                                                                                                                         |
-| `--pin-weight [PIN_WEIGHT]`                | FlexGen: whether to pin weights (setting this to False reduces CPU memory by 20%).                                                                                                                                                                                                            |
-| `--deepspeed`                              | Enable the use of DeepSpeed ZeRO-3 for inference via the Transformers integration.                                                                                                                                                                                                            |
-| `--nvme-offload-dir NVME_OFFLOAD_DIR`      | DeepSpeed: Directory to use for ZeRO-3 NVME offloading.                                                                                                                                                                                                                                       |
-| `--local_rank LOCAL_RANK`                  | DeepSpeed: Optional argument for distributed setups.                                                                                                                                                                                                                                          |
-| `--rwkv-strategy RWKV_STRATEGY`            | RWKV: The strategy to use while loading the model. Examples: "cpu fp32", "cuda fp16", "cuda fp16i8".                                                                                                                                                                                          |
-| `--rwkv-cuda-on`                           | RWKV: Compile the CUDA kernel for better performance.                                                                                                                                                                                                                                         |
-| `--no-stream`                              | Don't stream the text output in real time. This improves the text generation performance.                                                                                                                                                                                                     |
-| `--settings SETTINGS_FILE`                 | Load the default interface settings from this json file. See `settings-template.json` for an example. If you create a file called `settings.json`, this file will be loaded by default without the need to use the `--settings` flag.                                                         |
-| `--extensions EXTENSIONS [EXTENSIONS ...]` | The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.                                                                                                                                                                             |
-| `--listen`                                 | Make the web UI reachable from your local network.                                                                                                                                                                                                                                            |
-| `--listen-port LISTEN_PORT`                | The listening port that the server will use.                                                                                                                                                                                                                                                  |
-| `--share`                                  | Create a public URL. This is useful for running the web UI on Google Colab or similar.                                                                                                                                                                                                        |
-| `--verbose`                                | Print the prompts to the terminal.                                                                                                                                                                                                                                                            |
+| Flag        | Description |
+|-------------|-------------|
+| `-h`, `--help`  | show this help message and exit |
+| `--model MODEL`    | Name of the model to load by default. |
+| `--notebook`  | Launch the web UI in notebook mode, where the output is written to the same text box as the input. |
+| `--chat`      | Launch the web UI in chat mode.|
+| `--cai-chat`  | Launch the web UI in chat mode with a style similar to Character.AI's. If the file `img_bot.png` or `img_bot.jpg` exists in the same folder as server.py, this image will be used as the bot's profile picture. Similarly, `img_me.png` or `img_me.jpg` will be used as your profile picture. |
+| `--cpu`       | Use the CPU to generate text.|
+| `--load-in-8bit`  | Load the model with 8-bit precision.|
+| `--load-in-4bit`  | Load the model with 4-bit precision. Currently only works with LLaMA.|
+| `--gptq-bits GPTQ_BITS`  |  Load a pre-quantized model with specified precision. 2, 3, 4 and 8 (bit) are supported. Currently only works with LLaMA and OPT. |
+| `--gptq-model-type MODEL_TYPE`  |  Model type of pre-quantized model. Currently only LLaMa and OPT are supported. |
+| `--bf16`  | Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. |
+| `--auto-devices` | Automatically split the model across the available GPU(s) and CPU.|
+| `--disk` | If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. |
+| `--disk-cache-dir DISK_CACHE_DIR` | Directory to save the disk cache to. Defaults to `cache/`. |
+|  `--gpu-memory GPU_MEMORY [GPU_MEMORY ...]` |  Maxmimum GPU memory in GiB to be allocated per GPU. Example: `--gpu-memory 10` for a single GPU, `--gpu-memory 10 5` for two GPUs. |
+| `--cpu-memory CPU_MEMORY`    | Maximum CPU memory in GiB to allocate for offloaded weights. Must be an integer number. Defaults to 99.|
+| `--flexgen`                   |         Enable the use of FlexGen offloading. |
+|  `--percent PERCENT [PERCENT ...]`    |  FlexGen: allocation percentages. Must be 6 numbers separated by spaces (default: 0, 100, 100, 0, 100, 0). |
+|  `--compress-weight`                  |  FlexGen: Whether to compress weight (default: False).|
+|  `--pin-weight [PIN_WEIGHT]`          |       FlexGen: whether to pin weights (setting this to False reduces CPU memory by 20%). |
+| `--deepspeed`    | Enable the use of DeepSpeed ZeRO-3 for inference via the Transformers integration. |
+| `--nvme-offload-dir NVME_OFFLOAD_DIR`    | DeepSpeed: Directory to use for ZeRO-3 NVME offloading. |
+| `--local_rank LOCAL_RANK`    | DeepSpeed: Optional argument for distributed setups. |
+|  `--rwkv-strategy RWKV_STRATEGY`         |    RWKV: The strategy to use while loading the model. Examples: "cpu fp32", "cuda fp16", "cuda fp16i8". |
+|  `--rwkv-cuda-on`                        |   RWKV: Compile the CUDA kernel for better performance. |
+| `--no-stream`   | Don't stream the text output in real time. |
+| `--settings SETTINGS_FILE` | Load the default interface settings from this json file. See `settings-template.json` for an example. If you create a file called `settings.json`, this file will be loaded by default without the need to use the `--settings` flag.|
+|  `--extensions EXTENSIONS [EXTENSIONS ...]` |  The list of extensions to load. If you want to load more than one extension, write the names separated by spaces. |
+| `--listen`   | Make the web UI reachable from your local network.|
+|  `--listen-port LISTEN_PORT` | The listening port that the server will use. |
+| `--share`   | Create a public URL. This is useful for running the web UI on Google Colab or similar. |
+| `--auto-launch` | Open the web UI in the default browser upon launch. |
+| `--verbose`   | Print the prompts to the terminal. |
 
 
 Out of memory errors? [Check this guide](https://github.com/oobabooga/text-generation-webui/wiki/Low-VRAM-guide).
 Out of memory errors? [Check this guide](https://github.com/oobabooga/text-generation-webui/wiki/Low-VRAM-guide).
 
 

+ 6 - 4
extensions/silero_tts/script.py

@@ -57,7 +57,7 @@ def remove_surrounded_chars(string):
 
 
 def remove_tts_from_history(name1, name2):
 def remove_tts_from_history(name1, name2):
     for i, entry in enumerate(shared.history['internal']):
     for i, entry in enumerate(shared.history['internal']):
-        shared.history['visible'][i][1] = entry[1]
+        shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]
     return chat.generate_chat_output(shared.history['visible'], name1, name2, shared.character)
     return chat.generate_chat_output(shared.history['visible'], name1, name2, shared.character)
 
 
 def toggle_text_in_history(name1, name2):
 def toggle_text_in_history(name1, name2):
@@ -66,9 +66,9 @@ def toggle_text_in_history(name1, name2):
         if visible_reply.startswith('<audio'):
         if visible_reply.startswith('<audio'):
             if params['show_text']:
             if params['show_text']:
                 reply = shared.history['internal'][i][1]
                 reply = shared.history['internal'][i][1]
-                shared.history['visible'][i][1] = f"{visible_reply.split('</audio>')[0]}</audio>\n\n{reply}"
+                shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>\n\n{reply}"]
             else:
             else:
-                shared.history['visible'][i][1] = f"{visible_reply.split('</audio>')[0]}</audio>"
+                shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"]
     return chat.generate_chat_output(shared.history['visible'], name1, name2, shared.character)
     return chat.generate_chat_output(shared.history['visible'], name1, name2, shared.character)
 
 
 def input_modifier(string):
 def input_modifier(string):
@@ -79,8 +79,9 @@ def input_modifier(string):
 
 
     # Remove autoplay from the last reply
     # Remove autoplay from the last reply
     if (shared.args.chat or shared.args.cai_chat) and len(shared.history['internal']) > 0:
     if (shared.args.chat or shared.args.cai_chat) and len(shared.history['internal']) > 0:
-        shared.history['visible'][-1][1] = shared.history['visible'][-1][1].replace('controls autoplay>','controls>')
+        shared.history['visible'][-1] = [shared.history['visible'][-1][0], shared.history['visible'][-1][1].replace('controls autoplay>','controls>')]
 
 
+    shared.processing_message = "*Is recording a voice message...*"
     return string
     return string
 
 
 def output_modifier(string):
 def output_modifier(string):
@@ -119,6 +120,7 @@ def output_modifier(string):
         if params['show_text']:
         if params['show_text']:
             string += f'\n\n{original_string}'
             string += f'\n\n{original_string}'
 
 
+    shared.processing_message = "*Is typing...*"
     return string
     return string
 
 
 def bot_prefix_modifier(string):
 def bot_prefix_modifier(string):

+ 6 - 4
modules/chat.py

@@ -126,8 +126,9 @@ def chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical
     else:
     else:
         prompt = custom_generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size)
         prompt = custom_generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size)
 
 
+    # Yield *Is typing...*
     if not regenerate:
     if not regenerate:
-        yield shared.history['visible']+[[visible_text, '*Is typing...*']]
+        yield shared.history['visible']+[[visible_text, shared.processing_message]]
 
 
     # Generate
     # Generate
     reply = ''
     reply = ''
@@ -168,7 +169,8 @@ def impersonate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typ
     prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size, impersonate=True)
     prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size, impersonate=True)
 
 
     reply = ''
     reply = ''
-    yield '*Is typing...*'
+    # Yield *Is typing...*
+    yield shared.processing_message
     for i in range(chat_generation_attempts):
     for i in range(chat_generation_attempts):
         for reply in generate_reply(prompt+reply, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=eos_token, stopping_string=f"\n{name2}:"):
         for reply in generate_reply(prompt+reply, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=eos_token, stopping_string=f"\n{name2}:"):
             reply, next_character_found = extract_message_from_reply(prompt, reply, name1, name2, check, impersonate=True)
             reply, next_character_found = extract_message_from_reply(prompt, reply, name1, name2, check, impersonate=True)
@@ -187,8 +189,8 @@ def regenerate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typi
     else:
     else:
         last_visible = shared.history['visible'].pop()
         last_visible = shared.history['visible'].pop()
         last_internal = shared.history['internal'].pop()
         last_internal = shared.history['internal'].pop()
-
-        yield generate_chat_output(shared.history['visible']+[[last_visible[0], '*Is typing...*']], name1, name2, shared.character)
+        # Yield '*Is typing...*'
+        yield generate_chat_output(shared.history['visible']+[[last_visible[0], shared.processing_message]], name1, name2, shared.character)
         for _history in chatbot_wrapper(last_internal[0], max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, chat_generation_attempts, regenerate=True):
         for _history in chatbot_wrapper(last_internal[0], max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, chat_generation_attempts, regenerate=True):
             if shared.args.cai_chat:
             if shared.args.cai_chat:
                 shared.history['visible'][-1] = [last_visible[0], _history[-1][1]]
                 shared.history['visible'][-1] = [last_visible[0], _history[-1][1]]

+ 3 - 2
modules/shared.py

@@ -11,6 +11,7 @@ is_RWKV = False
 history = {'internal': [], 'visible': []}
 history = {'internal': [], 'visible': []}
 character = 'None'
 character = 'None'
 stop_everything = False
 stop_everything = False
+processing_message = '*Is typing...*'
 
 
 # UI elements (buttons, sliders, HTML, etc)
 # UI elements (buttons, sliders, HTML, etc)
 gradio = {}
 gradio = {}
@@ -85,12 +86,12 @@ parser.add_argument('--nvme-offload-dir', type=str, help='DeepSpeed: Directory t
 parser.add_argument('--local_rank', type=int, default=0, help='DeepSpeed: Optional argument for distributed setups.')
 parser.add_argument('--local_rank', type=int, default=0, help='DeepSpeed: Optional argument for distributed setups.')
 parser.add_argument('--rwkv-strategy', type=str, default=None, help='RWKV: The strategy to use while loading the model. Examples: "cpu fp32", "cuda fp16", "cuda fp16i8".')
 parser.add_argument('--rwkv-strategy', type=str, default=None, help='RWKV: The strategy to use while loading the model. Examples: "cpu fp32", "cuda fp16", "cuda fp16i8".')
 parser.add_argument('--rwkv-cuda-on', action='store_true', help='RWKV: Compile the CUDA kernel for better performance.')
 parser.add_argument('--rwkv-cuda-on', action='store_true', help='RWKV: Compile the CUDA kernel for better performance.')
-parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the text output in real time. This improves the text generation performance.')
+parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the text output in real time.')
 parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example. If you create a file called settings.json, this file will be loaded by default without the need to use the --settings flag.')
 parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example. If you create a file called settings.json, this file will be loaded by default without the need to use the --settings flag.')
 parser.add_argument('--extensions', type=str, nargs="+", help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
 parser.add_argument('--extensions', type=str, nargs="+", help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
 parser.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.')
 parser.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.')
 parser.add_argument('--listen-port', type=int, help='The listening port that the server will use.')
 parser.add_argument('--listen-port', type=int, help='The listening port that the server will use.')
 parser.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
 parser.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
+parser.add_argument('--auto-launch', action='store_true', default=False, help='Open the web UI in the default browser upon launch.')
 parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
 parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
-parser.add_argument('--auto-launch', action='store_true', default=False, help='Open the web UI in the default browser upon launch')
 args = parser.parse_args()
 args = parser.parse_args()

+ 1 - 1
modules/text_generation.py

@@ -123,7 +123,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
     original_input_ids = input_ids
     original_input_ids = input_ids
     output = input_ids[0]
     output = input_ids[0]
     cuda = "" if (shared.args.cpu or shared.args.deepspeed or shared.args.flexgen) else ".cuda()"
     cuda = "" if (shared.args.cpu or shared.args.deepspeed or shared.args.flexgen) else ".cuda()"
-    eos_token_ids = [shared.tokenizer.eos_token_id]
+    eos_token_ids = [shared.tokenizer.eos_token_id] if shared.tokenizer.eos_token_id is not None else []
     if eos_token is not None:
     if eos_token is not None:
         eos_token_ids.append(int(encode(eos_token)[0][-1]))
         eos_token_ids.append(int(encode(eos_token)[0][-1]))
     stopping_criteria_list = transformers.StoppingCriteriaList()
     stopping_criteria_list = transformers.StoppingCriteriaList()

+ 4 - 4
requirements.txt

@@ -1,11 +1,11 @@
-accelerate==0.17.0
-bitsandbytes==0.37.0
+accelerate==0.17.1
+bitsandbytes==0.37.1
 flexgen==0.1.7
 flexgen==0.1.7
 gradio==3.18.0
 gradio==3.18.0
 numpy
 numpy
 requests
 requests
-rwkv==0.3.1
+rwkv==0.4.2
 safetensors==0.3.0
 safetensors==0.3.0
 sentencepiece
 sentencepiece
 tqdm
 tqdm
-git+https://github.com/zphang/transformers@llama_push
+git+https://github.com/zphang/transformers.git@68d640f7c368bcaaaecfc678f11908ebbd3d6176