Spaces:
Sleeping
Sleeping
gadkins
commited on
Commit
•
9abbe21
1
Parent(s):
83e7855
Add app
Browse files- .gitignore +8 -0
- Dockerfile +11 -0
- main.py +17 -0
- request.py +27 -0
- requirements.txt +5 -0
- utils.py +47 -0
.gitignore
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
api_calls/.env
|
2 |
+
api_calls/apiEnv
|
3 |
+
.env
|
4 |
+
*env
|
5 |
+
*venv
|
6 |
+
__pycache__/
|
7 |
+
*pdfChatEnv
|
8 |
+
*checkpoint.ipynb
|
Dockerfile
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
|
3 |
+
WORKDIR /code
|
4 |
+
|
5 |
+
COPY ./requirements.txt /code/requirements.txt
|
6 |
+
|
7 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
8 |
+
|
9 |
+
COPY . .
|
10 |
+
|
11 |
+
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
main.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from utils import chat_completion_request
|
4 |
+
|
5 |
+
app = FastAPI()
|
6 |
+
|
7 |
+
class ChatRequest(BaseModel):
|
8 |
+
input_str: str
|
9 |
+
|
10 |
+
@app.get("/")
|
11 |
+
async def root():
|
12 |
+
return {"message": "PDF Chatbot API"}
|
13 |
+
|
14 |
+
@app.post("/chat")
|
15 |
+
async def submit_query(request: ChatRequest):
|
16 |
+
chat_response = chat_completion_request(request.input_str)
|
17 |
+
return {"role": "assistant", "content": chat_response}
|
request.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import json
|
3 |
+
|
4 |
+
url = 'http://127.0.0.1:8000/generate'
|
5 |
+
headers = {
|
6 |
+
'accept': 'application/json',
|
7 |
+
'Content-Type': 'application/json',
|
8 |
+
}
|
9 |
+
data = {
|
10 |
+
"name": "Laptop",
|
11 |
+
"notes": "4GB RAM, 256 GB Disk"
|
12 |
+
}
|
13 |
+
|
14 |
+
response = requests.post(url, headers=headers, data=json.dumps(data))
|
15 |
+
|
16 |
+
print(response.json())
|
17 |
+
# Check the status code of the response
|
18 |
+
# if response.status_code == 200:
|
19 |
+
# try:
|
20 |
+
# print(response.json())
|
21 |
+
# except json.decoder.JSONDecodeError:
|
22 |
+
# print("Response is not in JSON format.")
|
23 |
+
# else:
|
24 |
+
# print(f"Error: Received status code {response.status_code}")
|
25 |
+
# print("Response text:", response.text)
|
26 |
+
|
27 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
openai
|
2 |
+
fastapi
|
3 |
+
uvicorn
|
4 |
+
requests
|
5 |
+
python-dotenv
|
utils.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
from dotenv import dotenv_values
|
5 |
+
|
6 |
+
# Load model and API endpoint from environment variables
|
7 |
+
config = dotenv_values(".env")
|
8 |
+
model = config.get("MODEL")
|
9 |
+
api_endpoint = config.get("API_ENDPOINT")
|
10 |
+
|
11 |
+
openai_api_base = api_endpoint + '/v1'
|
12 |
+
|
13 |
+
# Initialize the OpenAI client
|
14 |
+
client = OpenAI(
|
15 |
+
api_key="EMPTY", # Replace with your actual API key if required
|
16 |
+
base_url=openai_api_base,
|
17 |
+
)
|
18 |
+
|
19 |
+
def chat_completion_request(input):
|
20 |
+
|
21 |
+
messages = [
|
22 |
+
{"role": "user", "content": f"{input}"},
|
23 |
+
]
|
24 |
+
# Create chat completions using the OpenAI client
|
25 |
+
chat_response = client.chat.completions.create(
|
26 |
+
model=model,
|
27 |
+
messages=messages,
|
28 |
+
temperature=0,
|
29 |
+
max_tokens=500
|
30 |
+
)
|
31 |
+
|
32 |
+
# Extract the completion text from the response
|
33 |
+
if chat_response.choices:
|
34 |
+
completion_text = chat_response.choices[0].message.content
|
35 |
+
else:
|
36 |
+
completion_text = None
|
37 |
+
|
38 |
+
return completion_text
|
39 |
+
|
40 |
+
|
41 |
+
# # Test the function
|
42 |
+
# messages = [
|
43 |
+
# {"role": "user", "content": "Write a long essay on the topic of spring."}
|
44 |
+
# ]
|
45 |
+
|
46 |
+
# chat_response = chat_completion_request_openai(messages, client)
|
47 |
+
# messages.append({"role": "assistant", "content": chat_response})
|