File size: 799 Bytes
b79d82a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
transforms: [sentencepiece]

#### Subword
src_subword_model: "/mnt/InternalCrucial4/dataAI/mistral-7B/mistral-instruct-v0.2/tokenizer.model"
tgt_subword_model: "/mnt/InternalCrucial4/dataAI/mistral-7B/mistral-instruct-v0.2/tokenizer.model"

# Model info
model: "/mnt/InternalCrucial4/dataAI/mistral-7B/mistral-instruct-v0.2/Mistral-7B-instruct-onmt-awq-gemm.pt"

# Inference
seed: 42
max_length: 256
gpu: 0
batch_type: sents
batch_size: 60
world_size: 1
gpu_ranks: [0]
#parallel_mode: "tensor_parallel"
#quant_layers: ['w_1', 'w_2', 'w_3', 'linear_values', 'linear_query', 'linear_keys', 'final_linear']
#quant_type: "bnb_NF4"
precision: fp16
#random_sampling_topk: 1
#random_sampling_topp: 0.6
#random_sampling_temp: 0.9
beam_size: 1
n_best: 1
profile: false
report_time: true
src: None
#tgt: None