File size: 3,800 Bytes
6fd136c b0d1bb1 6fd136c b0d1bb1 6fd136c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
# Import necessary libraries and modules
from fastapi import FastAPI, HTTPException, Request
from pydantic import BaseModel
from pathlib import Path
from dotenv import load_dotenv
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
from data import load_db
from names import DATASET_ID, MODEL_ID
from storage import RedisStorage
from utils import weighted_random_sample
import os
import numpy as np
from typing import List, Dict
# Load environment variables
load_dotenv()
# Initialize FastAPI app
app = FastAPI(title="Play My Emotions API", description="API for emotion-based song recommendation", version="1.0")
# Define request and response models
class EmotionInput(BaseModel):
emotion_text: str
class SongRecommendation(BaseModel):
emotions: str
recommended_songs: List[Dict[str, str]]
# Check if storage is enabled
USE_STORAGE = os.environ.get("USE_STORAGE", "True").lower() in ("true", "t", "1")
# Initialize necessary components for the app
def init():
embeddings = OpenAIEmbeddings(model=MODEL_ID)
dataset_path = f"hub://{os.environ['ACTIVELOOP_ORG_ID']}/{DATASET_ID}"
db = load_db(
dataset_path,
embedding_function=embeddings,
token=os.environ["ACTIVELOOP_TOKEN"],
read_only=True,
)
storage = RedisStorage(
host=os.environ["UPSTASH_URL"], password=os.environ["UPSTASH_PASSWORD"]
)
prompt = PromptTemplate(
input_variables=["user_input"],
template=Path("prompts/bot.prompt").read_text(),
)
llm = ChatOpenAI(temperature=0.3)
chain = LLMChain(llm=llm, prompt=prompt)
return db, storage, chain
db, storage, chain = init()
# Endpoint to Check Request Details
@app.get("/check_request")
async def check_request(request: Request):
return {
"base_url": str(request.base_url),
"url": str(request.url),
"client": str(request.client),
"headers": dict(request.headers),
}
# Define API endpoint for song recommendation
@app.post("/recommend", response_model=SongRecommendation)
async def recommend_song(emotion: EmotionInput):
user_input = emotion.emotion_text
if not user_input:
raise HTTPException(status_code=400, detail="Emotion input is required")
docs, emotions = get_song(user_input, k=20) # Assuming max_number_of_songs is 20 for now
# Modified to include song name and embed_url
recommended_songs = [{"name": doc.metadata["name"], "embed_url": doc.metadata["embed_url"]} for doc in docs]
return {"emotions": emotions, "recommended_songs": recommended_songs}
# Helper function to get song based on user input
def get_song(user_input: str, k: int = 20):
emotions = chain.run(user_input=user_input)
matches = db.similarity_search_with_score(emotions, distance_metric="cos", k=k)
docs, scores = zip(
*normalize_scores_by_sum(filter_scores(matches, 0.8)) # Assuming filter_threshold is 0.8 for now
)
choosen_docs = weighted_random_sample(
np.array(docs), np.array(scores), n=2 # Assuming number_of_displayed_songs is 2 for now
).tolist()
return choosen_docs, emotions
# Helper function to filter scores
def filter_scores(matches, th: float = 0.8):
return [(doc, score) for (doc, score) in matches if score > th]
# Helper function to normalize scores
def normalize_scores_by_sum(matches):
scores = [score for _, score in matches]
tot = sum(scores)
return [(doc, (score / tot)) for doc, score in matches]
# Run the app using uvicorn when the script is executed directly
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
|