SHMIS commited on
Commit
8d895e5
1 Parent(s): be0d060

Upload 2 files

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. models3.json +13 -23
  3. vicuna-13b-v1.5-16k.Q4_0.gguf +3 -0
.gitattributes CHANGED
@@ -42,3 +42,4 @@ phi3-4k-it-nvidia-chat-q4-GGUF.gguf filter=lfs diff=lfs merge=lfs -text
42
  phi-3-medium-4k-instruct-q4_0.gguf filter=lfs diff=lfs merge=lfs -text
43
  stablelm-2-12b-chat-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
44
  Meta-Llama-3-8B-Instruct.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
 
 
42
  phi-3-medium-4k-instruct-q4_0.gguf filter=lfs diff=lfs merge=lfs -text
43
  stablelm-2-12b-chat-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
44
  Meta-Llama-3-8B-Instruct.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
45
+ vicuna-13b-v1.5-16k.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
models3.json CHANGED
@@ -1,11 +1,11 @@
1
  [
2
  {
3
- "name": "StableLM 12B",
4
- "filename": "stablelm-2-12b-chat-Q4_0.gguf",
5
- "url": "https://huggingface.co/SHMIS/ABS_AI_Helper/resolve/main/stablelm-2-12b-chat-Q4_0.gguf?download=true",
6
- "promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
7
- "systemPrompt": "%1\n",
8
- "stopWord": ["<|end|>"],
9
  "type": "model",
10
  "requireRam": 17
11
  },
@@ -20,14 +20,14 @@
20
  "requireRam": 17
21
  },
22
  {
23
- "name": "vicuna-13b-16k",
24
- "filename": "vicuna-13b-v1.5-16k.Q4_0.gguf",
25
- "url": "https://huggingface.co/TheBloke/vicuna-13B-v1.5-16K-GGUF/resolve/main/vicuna-13b-v1.5-16k.Q4_0.gguf?download=true",
26
- "promptTemplate": "User:\n%1\nAssistant:\n%2\n",
27
- "systemPrompt": "System:\n%1\n",
28
- "stopWord": ["<s>","</s>"],
29
  "type": "model",
30
- "requireRam": 17
31
  },
32
  {
33
  "name": "Llama 3 8b neural",
@@ -48,16 +48,6 @@
48
  "stopWord": ["<|eot_id|>"],
49
  "type": "model",
50
  "requireRam": 12
51
- },
52
- {
53
- "name": "Llama 3 8b",
54
- "filename": "Meta-Llama-3-8B-Instruct.Q4_0.gguf",
55
- "url": "https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct.Q4_0.gguf?download=true",
56
- "promptTemplate": "<|start_header_id|>user<|end_header_id|>\n%1\n<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>\n%2\n<|eot_id|>\n",
57
- "systemPrompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n%1\n<|eot_id|>\n",
58
- "stopWord": ["<|eot_id|>"],
59
- "type": "model",
60
- "requireRam": 12
61
  },
62
  {
63
  "name": "QWen 2 7b",
 
1
  [
2
  {
3
+ "name": "vicuna-13b-16k",
4
+ "filename": "vicuna-13b-v1.5-16k.Q4_0.gguf",
5
+ "url": "https://huggingface.co/TheBloke/vicuna-13B-v1.5-16K-GGUF/resolve/main/vicuna-13b-v1.5-16k.Q4_0.gguf?download=true",
6
+ "promptTemplate": "User:\n%1\nAssistant:\n%2\n",
7
+ "systemPrompt": "System:\n%1\n",
8
+ "stopWord": ["<s>","</s>"],
9
  "type": "model",
10
  "requireRam": 17
11
  },
 
20
  "requireRam": 17
21
  },
22
  {
23
+ "name": "Llama 3 8b",
24
+ "filename": "Meta-Llama-3-8B-Instruct.Q4_0.gguf",
25
+ "url": "https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct.Q4_0.gguf?download=true",
26
+ "promptTemplate": "<|start_header_id|>user<|end_header_id|>\n%1\n<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>\n%2\n<|eot_id|>\n",
27
+ "systemPrompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n%1\n<|eot_id|>\n",
28
+ "stopWord": ["<|eot_id|>"],
29
  "type": "model",
30
+ "requireRam": 12
31
  },
32
  {
33
  "name": "Llama 3 8b neural",
 
48
  "stopWord": ["<|eot_id|>"],
49
  "type": "model",
50
  "requireRam": 12
 
 
 
 
 
 
 
 
 
 
51
  },
52
  {
53
  "name": "QWen 2 7b",
vicuna-13b-v1.5-16k.Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:776a8dc92c9a1eaf2a5eb8fd5e88753bd438adf9c992df96ac514d49764fb2da
3
+ size 7365834688