Spaces:
Running
Running
File size: 5,094 Bytes
0f3b6b0 5a6683b 0f3b6b0 2f0e84d 0f3b6b0 5a6683b bdc9425 5a6683b bdc9425 5a6683b 0f3b6b0 a0117bf 0f3b6b0 3c8dd01 2f0e84d 3c8dd01 0f3b6b0 2f0e84d 0f3b6b0 5a6683b 3c8dd01 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import gevent.pywsgi
from gevent import monkey;monkey.patch_all()
from flask import Flask, request, Response, jsonify
import argparse
import requests
import random
import string
import time
import json
import os
app = Flask(__name__)
app.json.sort_keys = False
parser = argparse.ArgumentParser(description="An example of Qwen demo with a similar API to OAI.")
parser.add_argument("--host", type=str, help="Set the ip address.(default: 0.0.0.0)", default='0.0.0.0')
parser.add_argument("--port", type=int, help="Set the port.(default: 7860)", default=7860)
args = parser.parse_args()
base_url = os.getenv('MODEL_BASE_URL')
@app.route('/api/v1/models', methods=["GET", "POST"])
@app.route('/v1/models', methods=["GET", "POST"])
def model_list():
time_now = int(time.time())
model_list = {
"object": "list",
"data": [
{
"id": "qwen",
"object": "model",
"created": time_now,
"owned_by": "tastypear"
},
{
"id": "gpt-3.5-turbo",
"object": "model",
"created": time_now,
"owned_by": "tastypear"
}
]
}
return jsonify(model_list)
@app.route("/", methods=["GET"])
def index():
return Response(f'QW1_5 OpenAI Compatible API<br><br>'+
f'Set "{os.getenv("SPACE_URL")}/api" as proxy (or API Domain) in your Chatbot.<br><br>'+
f'The complete API is: {os.getenv("SPACE_URL")}/api/v1/chat/completions')
@app.route("/api/v1/chat/completions", methods=["POST", "OPTIONS"])
@app.route("/v1/chat/completions", methods=["POST", "OPTIONS"])
def chat_completions():
if request.method == "OPTIONS":
return Response(
headers={
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*",
}
)
data = request.get_json()
# reorganize data
system = "You are a helpful assistant."
chat_history = []
prompt = ""
if "messages" in data:
messages = data["messages"]
message_size = len(messages)
prompt = messages[-1].get("content")
for i in range(message_size - 1):
role_this = messages[i].get("role")
role_next = messages[i + 1].get("role")
if role_this == "system":
system = messages[i].get("content")
elif role_this == "user":
if role_next == "assistant":
chat_history.append(
[messages[i].get("content"), messages[i + 1].get("content")]
)
else:
chat_history.append([messages[i].get("content"), " "])
# print(f'{system = }')
# print(f'{chat_history = }')
# print(f'{prompt = }')
fn_index = 0
# gen a random char(11) hash
chars = string.ascii_lowercase + string.digits
session_hash = "".join(random.choice(chars) for _ in range(11))
json_prompt = {
"data": [prompt, chat_history, system],
"fn_index": fn_index,
"session_hash": session_hash,
}
def generate():
response = requests.post(f"{base_url}/queue/join", json=json_prompt)
url = f"{base_url}/queue/data?session_hash={session_hash}"
data = requests.get(url, stream=True)
time_now = int(time.time())
for line in data.iter_lines():
if line:
decoded_line = line.decode("utf-8")
json_line = json.loads(decoded_line[6:])
if json_line["msg"] == "process_starts":
res_data = gen_res_data({}, time_now=time_now, start=True)
yield f"data: {json.dumps(res_data)}\n\n"
elif json_line["msg"] == "process_generating":
res_data = gen_res_data(json_line, time_now=time_now)
yield f"data: {json.dumps(res_data)}\n\n"
elif json_line["msg"] == "process_completed":
yield "data: [DONE]"
return Response(
generate(),
mimetype="text/event-stream",
headers={
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*",
},
)
def gen_res_data(data, time_now=0, start=False):
res_data = {
"id": "chatcmpl",
"object": "chat.completion.chunk",
"created": time_now,
"model": "qwen1_5",
"choices": [{"index": 0, "finish_reason": None}],
}
if start:
res_data["choices"][0]["delta"] = {"role": "assistant", "content": ""}
else:
chat_pair = data["output"]["data"][1]
if chat_pair == []:
res_data["choices"][0]["finish_reason"] = "stop"
else:
res_data["choices"][0]["delta"] = {"content": chat_pair[-1][-1]}
return res_data
if __name__ == "__main__":
# app.run(host=args.host, port=args.port, debug=True)
gevent.pywsgi.WSGIServer((args.host, args.port), app).serve_forever() |