Add .config/mods/mods.yml
This commit is contained in:
parent
b9fa64bb82
commit
a38970d672
1 changed files with 78 additions and 0 deletions
78
dot_config/private_mods/mods.yml
Normal file
78
dot_config/private_mods/mods.yml
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
# Default model (gpt-3.5-turbo, gpt-4, ggml-gpt4all-j...).
|
||||||
|
default-model: llamacorn-1.1b
|
||||||
|
# Text to append when using the -f flag.
|
||||||
|
format-text:
|
||||||
|
markdown: "Format the response as markdown without enclosing backticks."
|
||||||
|
json: "Format the response as json without enclosing backticks."
|
||||||
|
# List of predefined system messages that can be used as roles.
|
||||||
|
roles:
|
||||||
|
"default": []
|
||||||
|
# Example, a role called `shell`:
|
||||||
|
shell:
|
||||||
|
- you are a shell expert
|
||||||
|
- you do not explain anything
|
||||||
|
- you simply output one liners to solve the problems you're asked
|
||||||
|
- you do not provide any explanation whatsoever, ONLY the command
|
||||||
|
# Ask for the response to be formatted as markdown unless otherwise set.
|
||||||
|
format: false
|
||||||
|
# System role to use.
|
||||||
|
role: "default"
|
||||||
|
# Render output as raw text when connected to a TTY.
|
||||||
|
raw: false
|
||||||
|
# Quiet mode (hide the spinner while loading and stderr messages for success).
|
||||||
|
quiet: false
|
||||||
|
# Temperature (randomness) of results, from 0.0 to 2.0.
|
||||||
|
temp: 1.0
|
||||||
|
# TopP, an alternative to temperature that narrows response, from 0.0 to 1.0.
|
||||||
|
topp: 1.0
|
||||||
|
# TopK, only sample from the top K options for each subsequent token.
|
||||||
|
topk: 50
|
||||||
|
# Turn off the client-side limit on the size of the input into the model.
|
||||||
|
no-limit: false
|
||||||
|
# Wrap formatted output at specific width (default is 80)
|
||||||
|
word-wrap: 80
|
||||||
|
# Include the prompt from the arguments in the response.
|
||||||
|
include-prompt-args: false
|
||||||
|
# Include the prompt from the arguments and stdin, truncate stdin to specified number of lines.
|
||||||
|
include-prompt: 0
|
||||||
|
# Maximum number of times to retry API calls.
|
||||||
|
max-retries: 5
|
||||||
|
# Your desired level of fanciness.
|
||||||
|
fanciness: 10
|
||||||
|
# Text to show while generating.
|
||||||
|
status-text: Generating
|
||||||
|
# Theme to use in the forms. Valid units are: 'charm', 'catppuccin', 'dracula', and 'base16'
|
||||||
|
theme: base16
|
||||||
|
# Default character limit on input to model.
|
||||||
|
max-input-chars: 12250
|
||||||
|
# Maximum number of tokens in response.
|
||||||
|
# max-tokens: 100
|
||||||
|
# Aliases and endpoints for OpenAI compatible REST API.
|
||||||
|
apis:
|
||||||
|
# localai:
|
||||||
|
# # LocalAI setup instructions: https://github.com/go-skynet/LocalAI#example-use-gpt4all-j-model
|
||||||
|
# base-url: http://localhost:8080
|
||||||
|
# models:
|
||||||
|
# ggml-gpt4all-j:
|
||||||
|
# aliases: ["local", "4all"]
|
||||||
|
# max-input-chars: 12250
|
||||||
|
# fallback:
|
||||||
|
localai:
|
||||||
|
base-url: http://127.0.0.1:1337
|
||||||
|
models:
|
||||||
|
"deepseek-coder-1.3b":
|
||||||
|
aliases: ["ds-coder"]
|
||||||
|
max-input-chars: 16484
|
||||||
|
fallback: llamacorn-1.1b
|
||||||
|
"llamacorn-1.1b":
|
||||||
|
aliases: ["local", "llcorn"]
|
||||||
|
max-input-chars: 2048
|
||||||
|
fallback: "tinyllama-1.1b"
|
||||||
|
"llava-phi-3-mini-int4.gguf":
|
||||||
|
aliases: ["llava"]
|
||||||
|
max-input-chars: 2048
|
||||||
|
fallback: llamacorn-1.1b
|
||||||
|
"tinyllama-1.1b":
|
||||||
|
aliases: ["llama"]
|
||||||
|
max-input-chars: 2048
|
||||||
|
fallback:
|
Loading…
Reference in a new issue