ソースを参照

Add --debug option

oobabooga 3 年 前
コミット
1462277923
2 ファイル変更5 行追加0 行削除
  1. 1 0
      README.md
  2. 4 0
      server.py

+ 1 - 0
README.md

@@ -135,6 +135,7 @@ Optionally, you can use the following command-line flags:
 | `--settings SETTINGS_FILE` | Load the default interface settings from this json file. See `settings-template.json` for an example.|
 | `--listen`   | Make the web UI reachable from your local network.|
 | `--share`   | Create a public URL. This is useful for running the web UI on Google Colab or similar. |
+| `--verbose`   | Print the prompts to the terminal. |
 
 Out of memory errors? [Check this guide](https://github.com/oobabooga/text-generation-webui/wiki/Low-VRAM-guide).
 

+ 4 - 0
server.py

@@ -34,6 +34,7 @@ parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the
 parser.add_argument('--settings', type=str, help='Load the default interface settings from this json file. See settings-template.json for an example.')
 parser.add_argument('--listen', action='store_true', help='Make the web UI reachable from your local network.')
 parser.add_argument('--share', action='store_true', help='Create a public URL. This is useful for running the web UI on Google Colab or similar.')
+parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
 args = parser.parse_args()
 
 if (args.chat or args.cai_chat) and not args.no_stream:
@@ -164,6 +165,9 @@ def formatted_outputs(reply, model_name):
 def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None, stopping_string=None):
     global model, tokenizer, model_name, loaded_preset, preset
 
+    if args.verbose:
+        print(f"\n\n{question}\n--------------------\n")
+
     if selected_model != model_name:
         model_name = selected_model
         model = tokenizer = None