-
Notifications
You must be signed in to change notification settings - Fork 3
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Combine default generation parameters and settings into one file.
- Loading branch information
Showing
5 changed files
with
124 additions
and
121 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,92 +1,109 @@ | ||
{ | ||
"num_predict": { | ||
"value": -1, | ||
"description": "Maximum number of tokens to predict when generating text.", | ||
"range": "-1 = infinity, -2 = until context filled" | ||
}, | ||
"temperature": { | ||
"value": 0.8, | ||
"description": "Increasing the temperature will make the model answer more creatively.", | ||
"range": "0.0-2.0" | ||
}, | ||
"repeat_penalty": { | ||
"value": 1.1, | ||
"description": "Higher value will penalize repetitions more strongly.", | ||
"range": "0.0-2.0" | ||
}, | ||
"repeat_last_n": { | ||
"value": 64, | ||
"description": "Sets how far back for the model to look back to prevent repetition.", | ||
"range": "0=disabled, -1=num_ctx" | ||
}, | ||
"top_k": { | ||
"value": 40, | ||
"description": "Reduces the probability of generating nonsense.", | ||
"range": "-1-100" | ||
}, | ||
"top_p": { | ||
"value": 0.9, | ||
"description": "Works together with top-k. A higher value will lead to more diverse text, while a lower value will generate more focused and conservative text.", | ||
"range": "0.0-1.0" | ||
}, | ||
"tfs_z": { | ||
"value": 1.0, | ||
"description": "Reduces the impact of less probable tokens from the output. A higher value will reduce the impact more.", | ||
"range": "0.0-1.0" | ||
}, | ||
"typical_p": { | ||
"value": 1.0, | ||
"description": "Reduces the impact of less probable tokens from the output.", | ||
"range": "0.0-1.0" | ||
}, | ||
"presence_penalty": { | ||
"value": 0.0, | ||
"description": "Penalizes new tokens based on their presence in the text so far.", | ||
"range": "0.0-1.0" | ||
}, | ||
"frequency_penalty": { | ||
"value": 0.0, | ||
"description": "Penalizes new tokens based on their frequency in the text so far.", | ||
"range": "0.0-1.0" | ||
}, | ||
"mirostat": { | ||
"value": 0, | ||
"description": "Enables or disables mirostat.", | ||
"range": "0=disable, 1=v1, 2=v2" | ||
}, | ||
"mirostat_tau": { | ||
"value": 5.0, | ||
"description": "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.", | ||
"range": "0.0-10.0" | ||
}, | ||
"mirostat_eta": { | ||
"value": 0.1, | ||
"description": "Influences how quickly the algorithm responds to feedback from the generated text.", | ||
"range": "0.0-1.0" | ||
}, | ||
"num_keep": { | ||
"value": 0, | ||
"description": "Number of tokens to keep unchanged at the beginning of the generated text.", | ||
"range": "Integer value" | ||
}, | ||
"penalize_newline": { | ||
"value": true, | ||
"description": "Whether to penalize the generation of new lines.", | ||
"range": "Boolean value" | ||
}, | ||
"stop": { | ||
"value": [], | ||
"description": "When this pattern is encountered the LLM will stop generating text and return.", | ||
"range": "Array of strings" | ||
}, | ||
"seed": { | ||
"value": -1, | ||
"description": "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.", | ||
"range": "Integer value" | ||
}, | ||
"num_ctx": { | ||
"value": 4096, | ||
"description": "Sets the size of the context window used to generate the next token.", | ||
"range": "The max context length depends on the model." | ||
} | ||
"host": "http://localhost:11434", | ||
"llm_name": "Ollama", | ||
"model_name": "", | ||
"openai_api_key": "", | ||
"gemini_api_key": "", | ||
"system": "", | ||
"speakResponse": false, | ||
"voice": "unknown", | ||
"rate": 0.0, | ||
"chunk_size":1024, | ||
"chunk_overlap":20, | ||
"similarity_top_k":2, | ||
"similarity_cutoff":0.0, | ||
"response_mode": "compact", | ||
"show_context": false, | ||
"parameters": { | ||
"num_ctx": { | ||
"value": 4096, | ||
"description": "Sets the size of the context window used to generate the next token.", | ||
"range": "The max context length depends on the model." | ||
}, | ||
"num_predict": { | ||
"value": -1, | ||
"description": "Maximum number of tokens to predict when generating text.", | ||
"range": "-1 = infinity, -2 = until context filled" | ||
}, | ||
"temperature": { | ||
"value": 0.8, | ||
"description": "Increasing the temperature will make the model answer more creatively.", | ||
"range": "0.0-2.0" | ||
}, | ||
"repeat_penalty": { | ||
"value": 1.1, | ||
"description": "Higher value will penalize repetitions more strongly.", | ||
"range": "0.0-2.0" | ||
}, | ||
"repeat_last_n": { | ||
"value": 64, | ||
"description": "Sets how far back for the model to look back to prevent repetition.", | ||
"range": "0=disabled, -1=num_ctx" | ||
}, | ||
"top_k": { | ||
"value": 40, | ||
"description": "Reduces the probability of generating nonsense.", | ||
"range": "-1-100" | ||
}, | ||
"top_p": { | ||
"value": 0.9, | ||
"description": "Works together with top-k. A higher value will lead to more diverse text, while a lower value will generate more focused and conservative text.", | ||
"range": "0.0-1.0" | ||
}, | ||
"tfs_z": { | ||
"value": 1.0, | ||
"description": "Reduces the impact of less probable tokens from the output. A higher value will reduce the impact more.", | ||
"range": "0.0-1.0" | ||
}, | ||
"typical_p": { | ||
"value": 1.0, | ||
"description": "Reduces the impact of less probable tokens from the output.", | ||
"range": "0.0-1.0" | ||
}, | ||
"presence_penalty": { | ||
"value": 0.0, | ||
"description": "Penalizes new tokens based on their presence in the text so far.", | ||
"range": "0.0-1.0" | ||
}, | ||
"frequency_penalty": { | ||
"value": 0.0, | ||
"description": "Penalizes new tokens based on their frequency in the text so far.", | ||
"range": "0.0-1.0" | ||
}, | ||
"mirostat": { | ||
"value": 0, | ||
"description": "Enables or disables mirostat.", | ||
"range": "0=disable, 1=v1, 2=v2" | ||
}, | ||
"mirostat_tau": { | ||
"value": 5.0, | ||
"description": "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.", | ||
"range": "0.0-10.0" | ||
}, | ||
"mirostat_eta": { | ||
"value": 0.1, | ||
"description": "Influences how quickly the algorithm responds to feedback from the generated text.", | ||
"range": "0.0-1.0" | ||
}, | ||
"num_keep": { | ||
"value": 0, | ||
"description": "Number of tokens to keep unchanged at the beginning of the generated text.", | ||
"range": "Integer value" | ||
}, | ||
"penalize_newline": { | ||
"value": true, | ||
"description": "Whether to penalize the generation of new lines.", | ||
"range": "Boolean value" | ||
}, | ||
"stop": { | ||
"value": [], | ||
"description": "When this pattern is encountered the LLM will stop generating text and return.", | ||
"range": "Array of strings" | ||
}, | ||
"seed": { | ||
"value": -1, | ||
"description": "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.", | ||
"range": "Integer value" | ||
} | ||
} | ||
} |