settings-template.json 1.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940
  1. {
  2. "max_new_tokens": 200,
  3. "max_new_tokens_min": 1,
  4. "max_new_tokens_max": 2000,
  5. "seed": -1,
  6. "name1": "You",
  7. "name2": "Assistant",
  8. "context": "This is a conversation with your Assistant. The Assistant is very helpful and is eager to chat with you and answer your questions.",
  9. "greeting": "Hello there!",
  10. "end_of_turn": "",
  11. "custom_stopping_strings": "",
  12. "stop_at_newline": false,
  13. "add_bos_token": true,
  14. "chat_prompt_size": 2048,
  15. "chat_prompt_size_min": 0,
  16. "chat_prompt_size_max": 2048,
  17. "chat_generation_attempts": 1,
  18. "chat_generation_attempts_min": 1,
  19. "chat_generation_attempts_max": 5,
  20. "default_extensions": [],
  21. "chat_default_extensions": [
  22. "gallery"
  23. ],
  24. "presets": {
  25. "default": "Default",
  26. ".*(alpaca|llama)": "LLaMA-Precise",
  27. ".*pygmalion": "NovelAI-Storywriter",
  28. ".*RWKV": "Naive"
  29. },
  30. "prompts": {
  31. "default": "QA",
  32. ".*(gpt4chan|gpt-4chan|4chan)": "GPT-4chan",
  33. ".*oasst": "Open Assistant",
  34. ".*alpaca": "Alpaca"
  35. },
  36. "lora_prompts": {
  37. "default": "QA",
  38. ".*(alpaca-lora-7b|alpaca-lora-13b|alpaca-lora-30b)": "Alpaca"
  39. }
  40. }