Upload 2 files
Browse files- input-vicuna.txt +1 -0
- mixtral-inference-awq.yaml +29 -0
input-vicuna.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
USER:⦅newline⦆Show me some attractions in Boston.⦅newline⦆⦅newline⦆ASSISTANT:⦅newline⦆
|
mixtral-inference-awq.yaml
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transforms: [sentencepiece]
|
2 |
+
|
3 |
+
#### Subword
|
4 |
+
src_subword_model: "/mnt/InternalCrucial4/dataAI/mixtral/tokenizer.model"
|
5 |
+
tgt_subword_model: "/mnt/InternalCrucial4/dataAI/mixtral/tokenizer.model"
|
6 |
+
|
7 |
+
# Model info
|
8 |
+
model: "/mnt/InternalCrucial4/dataAI/mixtral/mixtral-onmt-awq.pt"
|
9 |
+
|
10 |
+
# Inference
|
11 |
+
seed: 42
|
12 |
+
max_length: 256
|
13 |
+
gpu: 0
|
14 |
+
batch_type: sents
|
15 |
+
batch_size: 1
|
16 |
+
world_size: 2
|
17 |
+
gpu_ranks: [0, 1]
|
18 |
+
parallel_mode: "tensor_parallel"
|
19 |
+
precision: fp16
|
20 |
+
#random_sampling_topk: 1
|
21 |
+
#random_sampling_topp: 0.6
|
22 |
+
#random_sampling_temp: 0.9
|
23 |
+
beam_size: 1
|
24 |
+
n_best: 1
|
25 |
+
profile: false
|
26 |
+
report_time: true
|
27 |
+
src: None
|
28 |
+
#tgt: None
|
29 |
+
|