macadeliccc commited on
Commit
51f117f
1 Parent(s): ac8f820

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +153 -0
README.md ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2-7B
3
+ datasets:
4
+ - macadeliccc/opus_samantha
5
+ - cognitivecomputations/ultrachat-uncensored
6
+ - teknium/OpenHermes-2.5
7
+ - Sao10K/Claude-3-Opus-Instruct-15K
8
+ license: apache-2.0
9
+ ---
10
+ # Samantha Qwen2 7B AWQ
11
+
12
+ Trained on 2x4090 using QLoRa and FSDP
13
+
14
+ + [LoRa](macadeliccc/Samantha-Qwen2-7B-LoRa)
15
+
16
+ ## Launch Using VLLM
17
+
18
+ ```bash
19
+ python -m vllm.entrypoints.openai.api_server \
20
+ --model macadeliccc/Samantha-Qwen2-7B-AWQ \
21
+ --chat-template ./examples/template_chatml.jinja \
22
+ --quantization awq
23
+ ```
24
+
25
+ ```python
26
+ from openai import OpenAI
27
+ # Set OpenAI's API key and API base to use vLLM's API server.
28
+ openai_api_key = "EMPTY"
29
+ openai_api_base = "http://localhost:8000/v1"
30
+
31
+ client = OpenAI(
32
+ api_key=openai_api_key,
33
+ base_url=openai_api_base,
34
+ )
35
+
36
+ chat_response = client.chat.completions.create(
37
+ model="macadeliccc/Samantha-Qwen2-7B-AWQ",
38
+ messages=[
39
+ {"role": "system", "content": "You are a helpful assistant."},
40
+ {"role": "user", "content": "Tell me a joke."},
41
+ ]
42
+ )
43
+ print("Chat response:", chat_response)
44
+ ```
45
+
46
+ ## Prompt Template
47
+
48
+ ```
49
+ <|im_start|>system
50
+ You are a friendly assistant.<|im_end|>
51
+ <|im_start|>user
52
+ What is the capital of France?<|im_end|>
53
+ <|im_start|>assistant
54
+ The capital of France is Paris.
55
+ ```
56
+
57
+ ## Quants
58
+
59
+ + [AWQ](https://huggingface.co/macadeliccc/Samantha-Qwen2-7B-AWQ)
60
+
61
+
62
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
63
+ <details><summary>See axolotl config</summary>
64
+
65
+ axolotl version: `0.4.0`
66
+ ```yaml
67
+ base_model: Qwen/Qwen-7B
68
+ model_type: AutoModelForCausalLM
69
+ tokenizer_type: AutoTokenizer
70
+
71
+ trust_remote_code: true
72
+
73
+ load_in_8bit: false
74
+ load_in_4bit: true
75
+ strict: false
76
+
77
+ datasets:
78
+ - path: macadeliccc/opus_samantha
79
+ type: sharegpt
80
+ field: conversations
81
+ conversation: chatml
82
+ - path: uncensored-ultrachat.json
83
+ type: sharegpt
84
+ field: conversations
85
+ conversation: chatml
86
+ - path: openhermes_200k.json
87
+ type: sharegpt
88
+ field: conversations
89
+ conversation: chatml
90
+ - path: opus_instruct.json
91
+ type: sharegpt
92
+ field: conversations
93
+ conversation: chatml
94
+
95
+ chat_template: chatml
96
+ dataset_prepared_path:
97
+ val_set_size: 0.05
98
+ output_dir: ./outputs/lora-out
99
+
100
+ sequence_len: 2048
101
+ sample_packing: false
102
+ pad_to_sequence_len:
103
+
104
+ adapter: qlora
105
+ lora_model_dir:
106
+ lora_r: 32
107
+ lora_alpha: 16
108
+ lora_dropout: 0.05
109
+ lora_target_linear: true
110
+ lora_fan_in_fan_out:
111
+
112
+ wandb_project:
113
+ wandb_entity:
114
+ wandb_watch:
115
+ wandb_name:
116
+ wandb_log_model:
117
+
118
+ gradient_accumulation_steps: 4
119
+ micro_batch_size: 2
120
+ num_epochs: 1
121
+ optimizer: adamw_bnb_8bit
122
+ lr_scheduler: cosine
123
+ learning_rate: 0.0002
124
+
125
+ train_on_inputs: false
126
+ group_by_length: false
127
+ bf16: auto
128
+ fp16:
129
+ tf32: false
130
+
131
+ gradient_checkpointing: false
132
+ early_stopping_patience:
133
+ resume_from_checkpoint:
134
+ local_rank:
135
+ logging_steps: 1
136
+ xformers_attention:
137
+ flash_attention:
138
+
139
+ warmup_steps: 250
140
+ evals_per_epoch: 4
141
+ eval_table_size:
142
+ eval_max_new_tokens: 128
143
+ saves_per_epoch: 1
144
+ debug:
145
+ deepspeed:
146
+ weight_decay: 0.0
147
+ fsdp:
148
+ fsdp_config:
149
+ special_tokens:
150
+ ```
151
+
152
+ </details><br>
153
+