File size: 1,238 Bytes
6a4546d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
.*:
  wbits: 'None'
  model_type: 'None'
  groupsize: 'None'
  pre_layer: 0
  mode: 'cai-chat'
  skip_special_tokens: true
  custom_stopping_strings: ''
llama-[0-9]*b-4bit$:
  wbits: 4
  model_type: 'llama'
.*-(4bit|int4)-(gr128|128g):
  wbits: 4
  groupsize: 128
.*-(gr128|128g)-(4bit|int4):
  wbits: 4
  groupsize: 128
.*-3bit-(gr128|128g):
  wbits: 3
  groupsize: 128
.*-(gr128|128g)-3bit:
  wbits: 3
  groupsize: 128
.*oasst-sft-1-pythia-12b:
  mode: 'instruct'
  instruction_template: 'Open Assistant'
.*vicuna:
  mode: 'instruct'
  instruction_template: 'Vicuna-v0'
.*alpaca:
  mode: 'instruct'
  instruction_template: 'Alpaca'
.*alpaca-native-4bit:
  mode: 'instruct'
  instruction_template: 'Alpaca'
  wbits: 4
  groupsize: 128
.*(galactica|oasst):
  skip_special_tokens: false
.*dolly-v[0-9]-[0-9]*b:
  mode: 'instruct'
  instruction_template: 'Alpaca'
  skip_special_tokens: false
  custom_stopping_strings: '"### End"'
.*koala:
  mode: 'instruct'
  instruction_template: 'Koala'
.*chatglm:
  mode: 'instruct'
  instruction_template: 'ChatGLM'
.*llava:
  mode: 'instruct'
  model_type: 'llama'
  instruction_template: 'LLaVA'
  custom_stopping_strings: '"\n###"'
.*raven:
  mode: 'instruct'
  instruction_template: 'RWKV-Raven'