initial commit
Browse files- Dockerfile +11 -0
- README.md +6 -7
- app.py +189 -0
- index.html +37 -0
- requirements.txt +5 -0
Dockerfile
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
|
3 |
+
WORKDIR /code
|
4 |
+
|
5 |
+
COPY ./requirements.txt /code/requirements.txt
|
6 |
+
|
7 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
8 |
+
|
9 |
+
COPY . .
|
10 |
+
|
11 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
@@ -1,12 +1,11 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk:
|
7 |
-
sdk_version: 3.29.0
|
8 |
-
app_file: app.py
|
9 |
pinned: false
|
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Fastapi Hello World
|
3 |
+
emoji: ๐
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: green
|
6 |
+
sdk: docker
|
|
|
|
|
7 |
pinned: false
|
8 |
+
duplicated_from: souljoy/my_api
|
9 |
---
|
10 |
|
11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, status
|
2 |
+
from fastapi.responses import HTMLResponse
|
3 |
+
from pydantic import BaseModel
|
4 |
+
from fastapi.responses import JSONResponse, StreamingResponse
|
5 |
+
import requests
|
6 |
+
import json
|
7 |
+
import openai
|
8 |
+
import time
|
9 |
+
|
10 |
+
|
11 |
+
class Text(BaseModel):
|
12 |
+
content: str = ""
|
13 |
+
|
14 |
+
|
15 |
+
app = FastAPI()
|
16 |
+
key = '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'
|
17 |
+
openai.api_key = key
|
18 |
+
headers = {
|
19 |
+
'Content-Type': 'application/json',
|
20 |
+
'Authorization': 'Bearer ' + key
|
21 |
+
}
|
22 |
+
|
23 |
+
|
24 |
+
@app.get("/")
|
25 |
+
def home():
|
26 |
+
html_content = open('index.html').read()
|
27 |
+
return HTMLResponse(content=html_content, status_code=200)
|
28 |
+
|
29 |
+
|
30 |
+
@app.post("/qa_maker")
|
31 |
+
def sentiment_analysis_ep(content: Text = None):
|
32 |
+
url = 'https://api.openai.com/v1/chat/completions'
|
33 |
+
prompt = 'ๆ นๆฎไธ้ข็ๆ็ซ ๏ผ็ๆ็โ้ฎ้ขๅๅ็ญโQAๅฏน๏ผๅคงไบ5ไธช๏ผไปฅไธ่กไธไธชjsonๆ ผๅผ๏ผ{โquestionโ:"xxx","answer":"xxx"}๏ผ็ๆ๏ผ\n'
|
34 |
+
messages = [{"role": "user", "content": prompt + content.content}]
|
35 |
+
data = {
|
36 |
+
"model": "gpt-3.5-turbo",
|
37 |
+
"messages": messages
|
38 |
+
}
|
39 |
+
print("messages = \n", messages)
|
40 |
+
result = requests.post(url=url,
|
41 |
+
data=json.dumps(data),
|
42 |
+
headers=headers
|
43 |
+
)
|
44 |
+
res = str(result.json()['choices'][0]['message']['content']).strip()
|
45 |
+
print('res:', res)
|
46 |
+
res = {'content': res}
|
47 |
+
return JSONResponse(content=res)
|
48 |
+
|
49 |
+
|
50 |
+
@app.post("/chatpdf")
|
51 |
+
def chat_pdf_ep(content: Text = None):
|
52 |
+
url = 'https://api.openai.com/v1/chat/completions'
|
53 |
+
messages = [
|
54 |
+
{
|
55 |
+
"role": "system",
|
56 |
+
"content": "ไฝ ๆฏไธไธชๆ็จ็ๅฉๆ๏ผๅฏไปฅไฝฟ็จๆ็ซ ๅ
ๅฎนๅ็กฎๅฐๅ็ญ้ฎ้ขใไฝฟ็จๆไพ็ๆ็ซ ๆฅ็ๆไฝ ็็ญๆก๏ผไฝ้ฟๅ
้ๅญๅคๅถๆ็ซ ใๅฐฝๅฏ่ฝไฝฟ็จ่ชๅทฑ็่ฏใๅ็กฎใๆ็จใ็ฎๆดใๆธ
ๆฐใ"
|
57 |
+
}
|
58 |
+
]
|
59 |
+
obj = json.loads(content.content)
|
60 |
+
messages.append({"role": "system", "content": "ๆ็ซ ๅ
ๅฎน๏ผ\n" + obj['doc']})
|
61 |
+
history = obj['history']
|
62 |
+
for his in history:
|
63 |
+
messages.append({"role": "user", "content": his[0]})
|
64 |
+
messages.append({"role": "assistant", "content": his[1]})
|
65 |
+
messages.append({"role": "user", "content": obj['question']})
|
66 |
+
data = {
|
67 |
+
"model": "gpt-3.5-turbo",
|
68 |
+
"messages": messages
|
69 |
+
}
|
70 |
+
print("messages = \n", messages)
|
71 |
+
result = requests.post(url=url,
|
72 |
+
data=json.dumps(data),
|
73 |
+
headers=headers
|
74 |
+
)
|
75 |
+
res = str(result.json()['choices'][0]['message']['content']).strip()
|
76 |
+
content = {'content': res}
|
77 |
+
print('content:', content)
|
78 |
+
return JSONResponse(content=content)
|
79 |
+
|
80 |
+
|
81 |
+
@app.post("/sale")
|
82 |
+
def sale_ep(content: Text = None):
|
83 |
+
url = 'https://api.openai.com/v1/chat/completions'
|
84 |
+
messages = [
|
85 |
+
{
|
86 |
+
"role": "system",
|
87 |
+
"content": "ไฝ ๆฏๅฐ้นๆฑฝ่ฝฆ่ตๆทฑ็้ๅฎ๏ผๅฏไปฅๅ่ๆ็ซ ๅ
ๅฎนๆฅ็ๆไฝ ็็ญๆก๏ผไฝ้ฟๅ
้ๅญๅคๅถๆ็ซ ๏ผๅฐฝๅฏ่ฝไฝฟ็จ่ชๅทฑ็่ฏ๏ผ็ชๅบๅฐ้นๆฑฝ่ฝฆ็ไผ็นใ่ฏดๆ็จๆทๆฅๅบ่ฏ้ฉพๆ่ดญไนฐๅฐ้นๆฑฝ่ฝฆใๅ็กฎใๆ็จใๆ้ๅฎๆๅทงใๅฃ่ฏญๅใๆไบฒๅๅใ"
|
88 |
+
}
|
89 |
+
]
|
90 |
+
obj = json.loads(content.content)
|
91 |
+
messages.append({"role": "system", "content": "ๆ็ซ ๅ
ๅฎน๏ผ\n" + obj['doc']})
|
92 |
+
history = obj['history']
|
93 |
+
for his in history:
|
94 |
+
messages.append({"role": "user", "content": his[0]})
|
95 |
+
messages.append({"role": "assistant", "content": his[1]})
|
96 |
+
messages.append({"role": "user", "content": obj['question']})
|
97 |
+
data = {
|
98 |
+
"model": "gpt-3.5-turbo",
|
99 |
+
"messages": messages
|
100 |
+
}
|
101 |
+
print("messages = \n", messages)
|
102 |
+
result = requests.post(url=url,
|
103 |
+
data=json.dumps(data),
|
104 |
+
headers=headers
|
105 |
+
)
|
106 |
+
res = str(result.json()['choices'][0]['message']['content']).strip()
|
107 |
+
content = {'content': res}
|
108 |
+
print('content:', content)
|
109 |
+
return JSONResponse(content=content)
|
110 |
+
|
111 |
+
|
112 |
+
@app.post("/chatgpt")
|
113 |
+
def chat_gpt_ep(content: Text = None):
|
114 |
+
url = 'https://api.openai.com/v1/chat/completions'
|
115 |
+
obj = json.loads(content.content)
|
116 |
+
data = {
|
117 |
+
"model": "gpt-3.5-turbo",
|
118 |
+
"messages": obj['messages']
|
119 |
+
}
|
120 |
+
print("data = \n", data)
|
121 |
+
result = requests.post(url=url,
|
122 |
+
data=json.dumps(data),
|
123 |
+
headers=headers
|
124 |
+
)
|
125 |
+
res = str(result.json()['choices'][0]['message']['content']).strip()
|
126 |
+
content = {'content': res}
|
127 |
+
print('content:', content)
|
128 |
+
return JSONResponse(content=content)
|
129 |
+
|
130 |
+
|
131 |
+
async def chat_gpt_stream_fun(content: Text = None):
|
132 |
+
start_time = time.time()
|
133 |
+
obj = json.loads(content.content)
|
134 |
+
response = openai.ChatCompletion.create(
|
135 |
+
model='gpt-3.5-turbo',
|
136 |
+
messages=obj['messages'],
|
137 |
+
stream=True, # this time, we set stream=True
|
138 |
+
)
|
139 |
+
# create variables to collect the stream of chunks
|
140 |
+
collected_chunks = []
|
141 |
+
collected_messages = []
|
142 |
+
# iterate through the stream of events
|
143 |
+
for chunk in response:
|
144 |
+
chunk_time = time.time() - start_time # calculate the time delay of the chunk
|
145 |
+
collected_chunks.append(chunk) # save the event response
|
146 |
+
chunk_message = chunk['choices'][0]['delta'] # extract the message
|
147 |
+
collected_messages.append(chunk_message) # save the message
|
148 |
+
print(f"Message received {chunk_time:.2f} seconds after request: {chunk_message}") # print the delay and text
|
149 |
+
full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
|
150 |
+
print(f"Full conversation received: {full_reply_content}")
|
151 |
+
content = {'content': full_reply_content}
|
152 |
+
print('content:', content)
|
153 |
+
yield json.dumps(content) + '\n'
|
154 |
+
|
155 |
+
|
156 |
+
@app.post("/chatgptstream", status_code=status.HTTP_200_OK)
|
157 |
+
async def get_random_numbers(content: Text = None):
|
158 |
+
return StreamingResponse(chat_gpt_stream_fun(content), media_type='application/json')
|
159 |
+
|
160 |
+
|
161 |
+
@app.post("/embeddings")
|
162 |
+
def embeddings_ep(content: Text = None):
|
163 |
+
url = 'https://api.openai.com/v1/embeddings'
|
164 |
+
data = {
|
165 |
+
"model": "text-embedding-ada-002",
|
166 |
+
"input": content.content
|
167 |
+
}
|
168 |
+
result = requests.post(url=url,
|
169 |
+
data=json.dumps(data),
|
170 |
+
headers=headers
|
171 |
+
)
|
172 |
+
return JSONResponse(content=result.json())
|
173 |
+
|
174 |
+
|
175 |
+
@app.post("/create_image")
|
176 |
+
def create_image_ep(content: Text = None):
|
177 |
+
url = 'https://api.openai.com/v1/images/generations'
|
178 |
+
obj = json.loads(content.content)
|
179 |
+
data = {
|
180 |
+
"prompt": obj["prompt"],
|
181 |
+
"n": obj["n"],
|
182 |
+
"size": obj["size"]
|
183 |
+
}
|
184 |
+
print("data = \n", data)
|
185 |
+
result = requests.post(url=url,
|
186 |
+
data=json.dumps(data),
|
187 |
+
headers=headers
|
188 |
+
)
|
189 |
+
return JSONResponse(content=result.json())
|
index.html
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<html>
|
2 |
+
<!-- Title -->
|
3 |
+
<head>
|
4 |
+
<title>FastAPI Hello World</title>
|
5 |
+
</head>
|
6 |
+
|
7 |
+
<!-- Stylesheet -->
|
8 |
+
<style>
|
9 |
+
body {
|
10 |
+
font-family: Arial, Helvetica, sans-serif;
|
11 |
+
font-size: 16px;
|
12 |
+
line-height: 1.5;
|
13 |
+
margin: 0;
|
14 |
+
padding: 0;
|
15 |
+
}
|
16 |
+
h1 {
|
17 |
+
font-size: 2em;
|
18 |
+
margin: 0;
|
19 |
+
padding: 0;
|
20 |
+
/* Center */
|
21 |
+
text-align: center;
|
22 |
+
}
|
23 |
+
h3 {
|
24 |
+
margin: 0;
|
25 |
+
padding: 0;
|
26 |
+
/* Center */
|
27 |
+
text-align: center;
|
28 |
+
}
|
29 |
+
|
30 |
+
</style>
|
31 |
+
|
32 |
+
<!-- Body -->
|
33 |
+
<body>
|
34 |
+
|
35 |
+
|
36 |
+
</body>
|
37 |
+
</html>
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
flask
|
2 |
+
fastapi==0.74.*
|
3 |
+
requests==2.27.*
|
4 |
+
uvicorn[standard]==0.17.*
|
5 |
+
openai
|