kobkrit commited on
Commit
7e694dc
1 Parent(s): d5e9d4b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +64 -0
README.md CHANGED
@@ -132,6 +132,70 @@ Prompt format is based on ChatML.
132
 
133
  ## How to use
134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  ### Huggingface
136
  ```python
137
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
132
 
133
  ## How to use
134
 
135
+ ### Free API Service (hosted by Siam.Ai and Float16.cloud)
136
+
137
+ #### Siam.AI
138
+ ```
139
+ curl https://api.aieat.or.th/v1/completions \
140
+ -H "Content-Type: application/json" \
141
+ -H "Authorization: Bearer dummy" \
142
+ -d '{
143
+ "model": ".",
144
+ "prompt": "<|im_start|>system\nคุณคือผู้ช่วยตอบคำถามที่ฉลาดและซื่อสัตย์<|im_end|>\n<|im_start|>user\nกรุงเทพมหานครคืออะไร<|im_end|>\n<|im_start|>assistant\n",
145
+ "max_tokens": 512,
146
+ "temperature": 0.7,
147
+ "top_p": 0.8,
148
+ "top_k": 40,
149
+ "stop": ["<|im_end|>"]
150
+ }'
151
+ ```
152
+
153
+ #### Float16
154
+ ```
155
+ curl -X POST https://api.float16.cloud/dedicate/78y8fJLuzE/v1/chat/completions \
156
+ -H "Content-Type: application/json" \
157
+ -H "Authorization: Bearer float16-AG0F8yNce5s1DiXm1ujcNrTaZquEdaikLwhZBRhyZQNeS7Dv0X" \
158
+ -d '{
159
+ "model": "openthaigpt/openthaigpt1.5-7b-instruct",
160
+ "messages": [
161
+ {
162
+ "role": "system",
163
+ "content": "คุณคือผู้ช่วยตอบคำถามที่ฉลาดและซื่อสัตย์"
164
+ },
165
+ {
166
+ "role": "user",
167
+ "content": "สวัสดี"
168
+ }
169
+ ]
170
+ }'
171
+ ```
172
+
173
+ ### OpenAI Client Library (Hosted by VLLM, please see below.)
174
+ ```
175
+ import openai
176
+
177
+ # Configure OpenAI client to use vLLM server
178
+ openai.api_base = "http://127.0.0.1:8000/v1"
179
+ openai.api_key = "dummy" # vLLM doesn't require a real API key
180
+
181
+ prompt = "<|im_start|>system\nคุณคือผู้ช่วยตอบคำถามที่ฉลาดและซื่อสัตย์<|im_end|>\n<|im_start|>user\nกรุงเทพมหานครคืออะไร<|im_end|>\n<|im_start|>assistant\n"
182
+
183
+ try:
184
+ response = openai.Completion.create(
185
+ model=".", # Specify the model you're using with vLLM
186
+ prompt=prompt,
187
+ max_tokens=512,
188
+ temperature=0.7,
189
+ top_p=0.8,
190
+ top_k=40,
191
+ stop=["<|im_end|>"]
192
+ )
193
+ print("Generated Text:", response.choices[0].text)
194
+ except Exception as e:
195
+ print("Error:", str(e))
196
+ ```
197
+
198
+
199
  ### Huggingface
200
  ```python
201
  from transformers import AutoModelForCausalLM, AutoTokenizer