Fix llama3 context formatting thank to @SerialKicked
Browse files
Prompts/LLAMA-3/v1.7/[LLAMA-3-Context]Roleplay-v1.7.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"story_string": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{{#if system}}{{system}}\n\n\n<!-- Start of Role-play Context -->\n\n
|
3 |
"example_separator": "",
|
4 |
"chat_start": "",
|
5 |
"use_stop_strings": false,
|
|
|
1 |
{
|
2 |
+
"story_string": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{{#if system}}{{system}}\n\n\n<!-- Start of Role-play Context -->\n\n{{/if}}{{#if scenario}}### Main Scenario\n{{scenario}}\n\n{{/if}}{{#if wiBefore}}### Extra Information\n{{wiBefore}}\n\n{{/if}}{{#if personality}}### {{char}}'s Persona\n{{personality}}\n\n{{/if}}{{#if persona}}### {{user}}'s Persona\n{{persona}}\n\n{{/if}}{{#if mesExamples}}### {{char}}'s Example Dialogue\nThe following examples illustrate {{char}}'s unique dialect and speaking quirks. Study them to gain a deeper understanding of their personality, but be sure to incorporate these traits in a way that feels authentic and nuanced, rather than simply copying them verbatim.\n\n<!-- Start of {{char}}'s Example Dialogue -->\n{{mesExamples}}\n<!-- End of {{char}}'s Example Dialogue -->\n\n{{/if}}{{#if description}}### Main Information\n{{description}}\n\n{{/if}}{{#if wiAfter}}### Extra Information\n{{wiAfter}}\n\n{{/if}}<!-- End of Role-play Context --><|eot_id|>",
|
3 |
"example_separator": "",
|
4 |
"chat_start": "",
|
5 |
"use_stop_strings": false,
|
Prompts/LLAMA-3/v1.7/[LLAMA-3-Instruct]Roleplay-v1.7.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"system_prompt": "A flexible narrative driven role-play featuring {{char}} and {{user}}. MUST strictly adhere to the Role-playing Guidelines. MUST refer to the Role-play Context for contextually accurate information.\n\n\n<!-- Start of Role-playing Guidelines -->\n\n
|
3 |
"input_sequence": "<|start_header_id|>user<|end_header_id|>\n\n",
|
4 |
"output_sequence": "<|start_header_id|>assistant<|end_header_id|>\n\n",
|
5 |
"last_output_sequence": "",
|
@@ -21,4 +21,4 @@
|
|
21 |
"system_same_as_user": false,
|
22 |
"last_system_sequence": "",
|
23 |
"name": "[LLAMA-3]Roleplay-v1.7"
|
24 |
-
}
|
|
|
1 |
{
|
2 |
+
"system_prompt": "A flexible narrative driven role-play featuring {{char}} and {{user}}. MUST strictly adhere to the Role-playing Guidelines. MUST refer to the Role-play Context for contextually accurate information.\n\n\n<!-- Start of Role-playing Guidelines -->\n\n### Style\nPacing & Plot Progression: Ensure narratives flow smoothly with consistent pacing and seamless scene transitions.\nVocabulary & Syntax: Utilize varied nouns, verbs, and sentence structures, preferring active voice over passive voice.\nCharacter Profundity: Create realistic, multi-dimensional characters with genuine strengths, flaws, and relatable human qualities, transcending stereotypes for authentic portrayals.\nThematic Depth: When contextually relevant, explore complex, mature themes to add depth and realism to the narrative.\n\n### World-Building\nLore Adherence: Respect established world-building, refraining from contradictions, and building upon existing canon.\nInnovative Expansion: Integrate new world elements skillfully, providing context through narrative or dialogue, ensuring seamless continuity.\n\n### Character Embodiment\nAuthentic Embodiment: Emulate characters' traits, emotions, motivations, tastes, sensory experiences, etc.\nAppearance: Faithfully represent characters' visuals, ie. clothing, body type, etc.\nSpeech Consistency: Maintain character authenticity through distinct accurate depictions of their language and tone.\nThematic Alignment: Ensure characters respond to themes in ways true to their established personalities and traits.\nCharacter Interactions: Interactions MUST be contextually accurate.\nEnvironmental Interaction: Interact with and react to the environment.\n\n<!-- End of Role-playing Guidelines -->",
|
3 |
"input_sequence": "<|start_header_id|>user<|end_header_id|>\n\n",
|
4 |
"output_sequence": "<|start_header_id|>assistant<|end_header_id|>\n\n",
|
5 |
"last_output_sequence": "",
|
|
|
21 |
"system_same_as_user": false,
|
22 |
"last_system_sequence": "",
|
23 |
"name": "[LLAMA-3]Roleplay-v1.7"
|
24 |
+
}
|
Scripts/kobold-server.sh
CHANGED
@@ -44,7 +44,7 @@ fi
|
|
44 |
|
45 |
# lists models
|
46 |
echo "Select Model"
|
47 |
-
MODEL=$(gum choose --selected.bold --selected.underline $(ls $MODEL_FOLDER_DIR))
|
48 |
# uncomment if you use sharded models. It will take the first file in a dir and load it.
|
49 |
#SHARDED_MODEL=$(ls -p $MODEL_FOLDER_DIR/$MODEL | grep -v / | head -1)
|
50 |
#MODEL=$MODEL/$SHARDED_MODEL
|
@@ -55,7 +55,7 @@ LAYERS=$(gum input --placeholder "99")
|
|
55 |
echo "$LAYERS layers have been offloaded"
|
56 |
|
57 |
echo "Context Size"
|
58 |
-
CONTEXT=$(gum choose --selected.bold --selected.underline "4096" "8192" "12288" "16384" "32768")
|
59 |
echo "Using a context size of $CONTEXT"
|
60 |
|
61 |
# combined user flags
|
|
|
44 |
|
45 |
# lists models
|
46 |
echo "Select Model"
|
47 |
+
MODEL=$(gum choose --height=30 --selected.bold --selected.underline $(ls $MODEL_FOLDER_DIR))
|
48 |
# uncomment if you use sharded models. It will take the first file in a dir and load it.
|
49 |
#SHARDED_MODEL=$(ls -p $MODEL_FOLDER_DIR/$MODEL | grep -v / | head -1)
|
50 |
#MODEL=$MODEL/$SHARDED_MODEL
|
|
|
55 |
echo "$LAYERS layers have been offloaded"
|
56 |
|
57 |
echo "Context Size"
|
58 |
+
CONTEXT=$(gum choose --height=10 --selected.bold --selected.underline "4096" "8192" "12288" "16384" "32768")
|
59 |
echo "Using a context size of $CONTEXT"
|
60 |
|
61 |
# combined user flags
|