decision-helper-bot / last_app.py
DeMaking's picture
Rename app.py to last_app.py
4159e5e verified
raw
history blame
4.52 kB
import os
import logging
import time
from fastapi import FastAPI, Request
# from transformers import pipeline
from huggingface_hub import InferenceClient, login
import langid
# import asyncio
# Configure logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
# Get Hugging Face API token from environment variable
HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if not HF_HUB_TOKEN:
raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN.")
# Login and initialize the client
login(token=HF_HUB_TOKEN)
client = InferenceClient(api_key=HF_HUB_TOKEN)
# Create FastAPI app
app = FastAPI()
# # Function to detect language
# def detect_language(user_input):
# try:
# lang, _ = langid.classify(user_input)
# return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported"
# except Exception as e:
# logging.error(f"Language detection error: {e}")
# return "unsupported"
def detect_language(user_input: str) -> str:
"""
Detects the language of the input text.
Returns "hebrew" for Hebrew, "english" for English, and "unsupported" otherwise.
"""
try:
lang, _ = langid.classify(user_input)
if lang == "he":
return "hebrew"
elif lang == "en":
return "english"
else:
return "unsupported"
except Exception as e:
logger.error(f"Language detection error: {e}")
return "unsupported"
# # Function to generate response
# def generate_response(text):
# language = detect_language(text)
# if language == "hebrew":
# content = "转注谞讛 讘拽爪专讛 讗讘诇 转砖转祝 讗转 转讛诇讬讱 拽讘诇转 讛讛讞诇讟讜转 砖诇讱, " + text
# model = "microsoft/Phi-3.5-mini-instruct"
# elif language == "english":
# content = "keep it short but tell your decision making process, " + text
# model = "mistralai/Mistral-Nemo-Instruct-2407"
# else:
# return "Sorry, I only support Hebrew and English."
# messages = [{"role": "user", "content": content}]
# completion = client.chat.completions.create(
# model=model,
# messages=messages,
# max_tokens=2048,
# temperature=0.5,
# top_p=0.7
# )
# return completion.choices[0].message.content
def generate_response(text: str) -> str:
"""
Generates a response by selecting a prompt and model based on the language.
Uses the Hugging Face Inference API to get a chat completion.
"""
language = detect_language(text)
if language == "hebrew":
prompt = "转注谞讛 讘拽爪专讛 讗讘诇 转砖转祝 讗转 转讛诇讬讱 拽讘诇转 讛讛讞诇讟讜转 砖诇讱, " + text
model = "microsoft/Phi-3.5-mini-instruct"
elif language == "english":
prompt = "keep it short but tell your decision making process, " + text
model = "mistralai/Mistral-Nemo-Instruct-2407"
else:
return "Sorry, I only support Hebrew and English."
messages = [{"role": "user", "content": prompt}]
try:
completion = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=2048,
temperature=0.5,
top_p=0.7
)
return completion.choices[0].message.content
except Exception as e:
logger.error(f"Error generating response: {e}")
return "Error: Could not generate response."
@app.post("/generate_response")
async def generate_text(request: Request):
"""
Endpoint to generate a response from the chat model.
Expects a JSON with a "text" field.
"""
try:
data = await request.json()
text = data.get("text", "").strip()
if not text:
return {"error": "No text provided"}
response = generate_response(text)
return {"response": response}
except Exception as e:
logging.error(f"Error processing request: {e}")
return {"error": "An unexpected error occurred."}
@app.get("/")
async def root():
"""
Root endpoint to check that the API is running.
"""
return {"message": "Decision Helper API is running!"}
# Function to run bot.py
# def run_bot():
# logging.info("Starting Telegram bot...")
# subprocess.Popen(["python3", "bot.py"])
if __name__ == "__main__":
# run_bot()
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)