Spaces:
Sleeping
Sleeping
import openai | |
import os | |
from dotenv import load_dotenv, dotenv_values | |
from fastapi import FastAPI, Request | |
from pydantic import BaseModel | |
from fastapi.responses import RedirectResponse, HTMLResponse | |
import urllib.parse | |
import requests | |
# Load environment variables (assuming your API key is stored in a `.env` file) | |
load_dotenv() | |
api_key = os.environ.get('HUGGINGFACEHUB_API_TOKEN') | |
# OpenAI API configuration (specific to Meta-Llama-3-8B) | |
model_link = "meta-llama/Meta-Llama-3-8B-Instruct" | |
base_url = "https://api-inference.huggingface.co/v1" | |
app = FastAPI() | |
class Message(BaseModel): | |
message: str | |
async def read_root(): | |
return """Welcome to Up to 12 Chat Processor""" | |
async def receive_updates(request: Request): | |
data = await request.json() | |
print("Received Update:") | |
print(data) | |
response = process_text(data['message']) | |
print(f"Assistant: {response}") | |
return response | |
# def process_text(user_text): | |
# """ | |
# Processes user text using the Meta-Llama-3-8B model and returns the response. | |
# Args: | |
# user_text: The text entered by the user. | |
# Returns: | |
# The response generated by the model. | |
# """ | |
# # Initialize OpenAI client | |
# client = openai.OpenAI(api_key=api_key, base_url=base_url) | |
# try: | |
# # Generate response using OpenAI chat completion API | |
# response = client.chat.completions.create( | |
# model=model_link, | |
# messages=[{"role": "user", "content": user_text}], | |
# max_tokens=3000, | |
# temperature=0.5, # Adjust temperature for desired response randomness | |
# stream=False # Disable streaming for function use | |
# ) | |
# # Handle potential changes in response format | |
# if isinstance(response.choices, list): | |
# # Access response text if choices is a list (likely scenario) | |
# return response.choices[0].message.content.strip() | |
# else: | |
# # Handle potential alternative response format (less likely) | |
# return response.content.strip() if hasattr(response, 'content') else "An error occurred." | |
# except Exception as e: | |
# print(f"Error occurred: {e}") | |
# return "An error occurred while processing your request." | |
def process_text(user_text): | |
""" | |
Processes user text using the Meta-Llama-3-8B model and returns the correction response. | |
Args: | |
user_text: The text entered by the user. | |
Returns: | |
The response in a dictionary format containing the original text, corrected text, and reasons for corrections. | |
""" | |
# Initialize OpenAI client | |
client = openai.OpenAI(api_key=api_key, base_url=base_url) | |
corrected_text = "" | |
try: | |
prompt = f""" | |
Please correct the grammar, spelling and literal translation mistakes in "{user_text}". You will help A1, A2 CEFR students mostly. So, Focus on structure, self introduction, words meanings. Revise the text manytimes. Reply only with the revised text in the following format: | |
<Revised text> | |
If the {user_text} is only a set of words (not sentences) then check only spelling mistakes in this case | |
Generally Don't reply with mistakes report | |
""" | |
response = client.chat.completions.create( | |
model=model_link, | |
messages=[ | |
{"role": "user", "content": user_text}, | |
{"role": "grammar_corrector", "content": prompt} | |
], | |
max_tokens=3000, | |
temperature=0.1, # Adjust temperature for desired response randomness | |
stream=False # Disable streaming for function use | |
) | |
corrected_text = response.choices[0].message.content.strip() | |
print("After correction: ", corrected_text) | |
print("--------------------------------------") | |
except Exception as e: | |
print(f"Error occurred: {e}") | |
return {"error": e, "When": "During correcting sentences"} | |
try: | |
prompt = f""" | |
You are a grammar checker bot. Compare the user text "{user_text}" to the corrected text "{corrected_text}" and return every single corrected mistakes between the user text and the corrected text. Reply only with the mistakes in the following format: | |
<list of mistakes> | |
Don't reply with corrected text | |
""" | |
response = client.chat.completions.create( | |
model=model_link, | |
messages=[ | |
{"role": "user", "content": user_text}, | |
{"role": "grammar_corrector", "content": prompt} | |
], | |
max_tokens=3000, | |
temperature=0, # Adjust temperature for desired response randomness | |
stream=False # Disable streaming for function use | |
) | |
mistakes = response.choices[0].message.content.strip() | |
print("Corrected text is: ", corrected_text) | |
print("Mistakes are: ", mistakes) | |
return {"Corrected_text": corrected_text, "Mistakes": mistakes} | |
except Exception as e: | |
print(f"Error occurred: {e}") | |
return {"error": e, "When": "During checking mistakes"} | |