feat: add Dockerfile
Browse files- Dockerfile +38 -0
- app.py +78 -0
- app/app.py +0 -69
- data_importer.py +7 -2
- interface.py +3 -0
- plan_mock.json +0 -129
- requirements.txt +38 -0
- utils/llm_caller.py +210 -163
- utils/youtube_extractor.py +2 -2
Dockerfile
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
# Set working directory
|
| 4 |
+
WORKDIR /app
|
| 5 |
+
|
| 6 |
+
# Set environment variables
|
| 7 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 8 |
+
ENV PYTHONUNBUFFERED=1
|
| 9 |
+
ENV PYTHONPATH=/app
|
| 10 |
+
|
| 11 |
+
# Install system dependencies
|
| 12 |
+
RUN apt-get update && apt-get install -y \
|
| 13 |
+
build-essential \
|
| 14 |
+
curl \
|
| 15 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 16 |
+
|
| 17 |
+
# Copy requirements first for better caching
|
| 18 |
+
COPY requirements.txt .
|
| 19 |
+
|
| 20 |
+
# Install Python dependencies
|
| 21 |
+
RUN pip install --no-cache-dir --upgrade pip && \
|
| 22 |
+
pip install --no-cache-dir -r requirements.txt
|
| 23 |
+
|
| 24 |
+
# Copy application code
|
| 25 |
+
COPY . .
|
| 26 |
+
|
| 27 |
+
# Create necessary directories
|
| 28 |
+
RUN mkdir -p /app/data /app/logs
|
| 29 |
+
|
| 30 |
+
# Expose port
|
| 31 |
+
EXPOSE 8000
|
| 32 |
+
|
| 33 |
+
# Health check
|
| 34 |
+
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
|
| 35 |
+
CMD curl -f http://localhost:8000/v1 || exit 1
|
| 36 |
+
|
| 37 |
+
# Run the application
|
| 38 |
+
CMD ["uvicorn", "app.app:app", "--host", "0.0.0.0", "--port", "8000"]
|
app.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
|
| 3 |
+
from interface import PlanRequest, PlanResponse, TripPlan , YoutubeLinkRequest, YoutubeLinkResponse, ChatRequest
|
| 4 |
+
from data_importer import DataImporter
|
| 5 |
+
from utils.llm_caller import LLMCaller
|
| 6 |
+
import asyncio
|
| 7 |
+
import time
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
|
| 10 |
+
app = FastAPI()
|
| 11 |
+
data_importer = DataImporter()
|
| 12 |
+
agent = LLMCaller()
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@app.get("/v1")
|
| 16 |
+
def greet_json():
|
| 17 |
+
start_time = time.time()
|
| 18 |
+
health_status = {
|
| 19 |
+
"status": "healthy",
|
| 20 |
+
"timestamp": datetime.utcnow().isoformat(),
|
| 21 |
+
"service": "SealionAI Travel Planning Service",
|
| 22 |
+
"version": "1.0.0",
|
| 23 |
+
"checks": {}
|
| 24 |
+
}
|
| 25 |
+
return health_status
|
| 26 |
+
|
| 27 |
+
@app.post("/v1/generateTripPlan", response_model=PlanResponse)
|
| 28 |
+
def generate_trip_plan(request: PlanRequest):
|
| 29 |
+
try:
|
| 30 |
+
trip_plan = asyncio.run(agent.query_with_rag(request))
|
| 31 |
+
return PlanResponse(tripOverview=trip_plan.tripOverview,
|
| 32 |
+
query_params=request,
|
| 33 |
+
retrieved_data=trip_plan.retrieved_data,
|
| 34 |
+
trip_plan=trip_plan.trip_plan,
|
| 35 |
+
meta={"status": "success", "timestamp": datetime.utcnow().isoformat()})
|
| 36 |
+
except Exception as e:
|
| 37 |
+
print(f"Error in generate_trip_plan: {e}")
|
| 38 |
+
# Return error response
|
| 39 |
+
return PlanResponse(
|
| 40 |
+
tripOverview=f"Error: {str(e)}",
|
| 41 |
+
query_params=request,
|
| 42 |
+
retrieved_data=[],
|
| 43 |
+
trip_plan=TripPlan(overview="Error occurred", total_estimated_cost=0.0, steps=[]),
|
| 44 |
+
meta={"status": "error", "error": str(e)}
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
@app.post("/v1/addYoutubeLink", response_model=YoutubeLinkResponse)
|
| 48 |
+
def add_youtube_link(request: YoutubeLinkRequest):
|
| 49 |
+
try:
|
| 50 |
+
data_importer.insert_from_youtube(request.video_id)
|
| 51 |
+
except Exception as e:
|
| 52 |
+
return YoutubeLinkResponse(
|
| 53 |
+
message="Failed to add YouTube link",
|
| 54 |
+
video_url= None
|
| 55 |
+
)
|
| 56 |
+
return YoutubeLinkResponse(
|
| 57 |
+
message="add successfully",
|
| 58 |
+
video_url=f"https://www.youtube.com/watch?v={request.video_id}"
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
@app.post("/v1/searchSimilar", response_model=list[dict])
|
| 62 |
+
def search_similar(request: YoutubeLinkRequest):
|
| 63 |
+
try:
|
| 64 |
+
results = data_importer.search_similar(query=request.video_id)
|
| 65 |
+
return results
|
| 66 |
+
except Exception as e:
|
| 67 |
+
print(f"Error during search: {e}")
|
| 68 |
+
return {"error": "Search failed"}
|
| 69 |
+
return []
|
| 70 |
+
|
| 71 |
+
@app.post("/v1/basicChat", response_model=str)
|
| 72 |
+
def basic_chat(request: ChatRequest):
|
| 73 |
+
user_message = request.message
|
| 74 |
+
llm_response = asyncio.run(agent.basic_query(
|
| 75 |
+
user_prompt=user_message
|
| 76 |
+
))
|
| 77 |
+
return llm_response
|
| 78 |
+
|
app/app.py
DELETED
|
@@ -1,69 +0,0 @@
|
|
| 1 |
-
from fastapi import FastAPI
|
| 2 |
-
|
| 3 |
-
from interface import PlanRequest, PlanResponse, PlanStep, TransportInfo, TripPlan , YoutubeLinkRequest, YoutubeLinkResponse
|
| 4 |
-
from data_importer import DataImporter
|
| 5 |
-
import os
|
| 6 |
-
import json
|
| 7 |
-
|
| 8 |
-
app = FastAPI()
|
| 9 |
-
data_importer = DataImporter()
|
| 10 |
-
|
| 11 |
-
def load_mock_data(path: str = "plan_mock.json") -> dict:
|
| 12 |
-
"""Load mock data from plan_mock.json"""
|
| 13 |
-
try:
|
| 14 |
-
file_path = os.path.join(os.path.dirname(__file__), path)
|
| 15 |
-
with open(file_path, 'r', encoding='utf-8') as file:
|
| 16 |
-
return json.load(file)
|
| 17 |
-
except FileNotFoundError:
|
| 18 |
-
# Return default mock data if file not found
|
| 19 |
-
print("Mock data file not found. Using default mock data.")
|
| 20 |
-
return {"error": "Invalid JSON format"}
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
@app.get("/v1")
|
| 24 |
-
def greet_json():
|
| 25 |
-
return {"Hello": "World!"}
|
| 26 |
-
|
| 27 |
-
@app.post("/v1/generateTripPlan", response_model=PlanResponse)
|
| 28 |
-
def generate_trip_plan(request: PlanRequest):
|
| 29 |
-
mock_trip_plan = load_mock_data()
|
| 30 |
-
print(mock_trip_plan)
|
| 31 |
-
return PlanResponse(
|
| 32 |
-
tripOverview="Sample trip overview.",
|
| 33 |
-
query_params=request,
|
| 34 |
-
retrieved_data=[],
|
| 35 |
-
trip_plan=TripPlan(
|
| 36 |
-
overview="Sample trip overview",
|
| 37 |
-
total_estimated_cost=1000.0,
|
| 38 |
-
steps=[PlanStep(
|
| 39 |
-
day=1,
|
| 40 |
-
title="Arrival in New York",
|
| 41 |
-
description="Arrive at JFK Airport and check-in at the hotel.",
|
| 42 |
-
transport=TransportInfo(
|
| 43 |
-
mode="Plane",
|
| 44 |
-
departure="Your hometown airport",
|
| 45 |
-
arrival="JFK Airport",
|
| 46 |
-
duration_minutes=300,
|
| 47 |
-
price=300.0,
|
| 48 |
-
details="Non-stop flight"
|
| 49 |
-
),
|
| 50 |
-
map_coordinates={"lat": 40.6413, "lon": -73.7781},
|
| 51 |
-
images=["https://example.com/images/jfk_airport.jpg"],
|
| 52 |
-
tips=["Bring a valid ID", "Confirm your hotel reservation"]
|
| 53 |
-
)]),
|
| 54 |
-
meta={"status": "success"}
|
| 55 |
-
)
|
| 56 |
-
|
| 57 |
-
# @app.post("/v1/addYoutubeLink", response_model=YoutubeLinkResponse)
|
| 58 |
-
# def add_youtube_link(request: YoutubeLinkRequest):
|
| 59 |
-
# try:
|
| 60 |
-
# data_importer.insert_from_youtube(request.video_id)
|
| 61 |
-
# except Exception as e:
|
| 62 |
-
# return YoutubeLinkResponse(
|
| 63 |
-
# message="Failed to add YouTube link",
|
| 64 |
-
# video_url=None
|
| 65 |
-
# )
|
| 66 |
-
# return YoutubeLinkResponse(
|
| 67 |
-
# message="add successfully",
|
| 68 |
-
# video_url=f"https://www.youtube.com/watch?v={request.video_id}"
|
| 69 |
-
# )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_importer.py
CHANGED
|
@@ -6,7 +6,7 @@ from typing import List, Dict, Optional, Union
|
|
| 6 |
import uuid
|
| 7 |
|
| 8 |
class DataImporter:
|
| 9 |
-
def __init__(self, qdrant_url: str = "
|
| 10 |
self.model = SentenceTransformer("BAAI/bge-m3")
|
| 11 |
self.client = QdrantClient(url=qdrant_url)
|
| 12 |
self.collection_name = collection_name
|
|
@@ -17,6 +17,11 @@ class DataImporter:
|
|
| 17 |
|
| 18 |
def _create_collection(self):
|
| 19 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
self.client.recreate_collection(
|
| 21 |
collection_name=self.collection_name,
|
| 22 |
vectors_config=VectorParams(size=1024, distance=Distance.COSINE)
|
|
@@ -67,7 +72,7 @@ class DataImporter:
|
|
| 67 |
def insert_from_youtube(self, video_id: str, metadata: Optional[Dict] = None) -> Optional[str]:
|
| 68 |
try:
|
| 69 |
# Extract text from YouTube (assuming your YoutubeExtractor has this method)
|
| 70 |
-
text = self.youtube_extractor.
|
| 71 |
if text:
|
| 72 |
video_metadata = {"source": "youtube", "video_id": video_id}
|
| 73 |
if metadata:
|
|
|
|
| 6 |
import uuid
|
| 7 |
|
| 8 |
class DataImporter:
|
| 9 |
+
def __init__(self, qdrant_url: str = "https://qdrant.taspolsd.dev", collection_name: str = "demo_bge_m3"):
|
| 10 |
self.model = SentenceTransformer("BAAI/bge-m3")
|
| 11 |
self.client = QdrantClient(url=qdrant_url)
|
| 12 |
self.collection_name = collection_name
|
|
|
|
| 17 |
|
| 18 |
def _create_collection(self):
|
| 19 |
try:
|
| 20 |
+
collections = self.client.get_collection(self.collection_name)
|
| 21 |
+
if collections:
|
| 22 |
+
print(f"Collection '{self.collection_name}' already exists.")
|
| 23 |
+
return
|
| 24 |
+
|
| 25 |
self.client.recreate_collection(
|
| 26 |
collection_name=self.collection_name,
|
| 27 |
vectors_config=VectorParams(size=1024, distance=Distance.COSINE)
|
|
|
|
| 72 |
def insert_from_youtube(self, video_id: str, metadata: Optional[Dict] = None) -> Optional[str]:
|
| 73 |
try:
|
| 74 |
# Extract text from YouTube (assuming your YoutubeExtractor has this method)
|
| 75 |
+
text = self.youtube_extractor.get_full_text(video_id)
|
| 76 |
if text:
|
| 77 |
video_metadata = {"source": "youtube", "video_id": video_id}
|
| 78 |
if metadata:
|
interface.py
CHANGED
|
@@ -65,3 +65,6 @@ class PlanResponse(BaseModel):
|
|
| 65 |
retrieved_data: List[RetrievedItem]
|
| 66 |
trip_plan: TripPlan
|
| 67 |
meta: Dict[str, Any]
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
retrieved_data: List[RetrievedItem]
|
| 66 |
trip_plan: TripPlan
|
| 67 |
meta: Dict[str, Any]
|
| 68 |
+
|
| 69 |
+
class ChatRequest(BaseModel):
|
| 70 |
+
message: str
|
plan_mock.json
DELETED
|
@@ -1,129 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"tripOverview": {
|
| 3 |
-
"summary": "Adventure trip from Bangkok to Chiang Mai with cultural exploration",
|
| 4 |
-
"regions": ["Thailand", "Chiang Mai"],
|
| 5 |
-
"destination": "Chiang Mai, Thailand",
|
| 6 |
-
"EstimatedCost": "Approximately 10,000 THB per person",
|
| 7 |
-
"durationDays": 5,
|
| 8 |
-
"purpose": "Adventure, cultural experience"
|
| 9 |
-
},
|
| 10 |
-
"milestones": [
|
| 11 |
-
"Bangkok Suvarnabhumi Airport",
|
| 12 |
-
"Chiang Mai International Airport",
|
| 13 |
-
"Doi Suthep-Pui National Park",
|
| 14 |
-
"Chiang Mai Old City",
|
| 15 |
-
"Elephant Nature Park",
|
| 16 |
-
"Chiang Rai Night Bazaar"
|
| 17 |
-
],
|
| 18 |
-
"transportation": [
|
| 19 |
-
{
|
| 20 |
-
"mode": "Plane",
|
| 21 |
-
"from": "Bangkok Suvarnabhumi Airport",
|
| 22 |
-
"to": "Chiang Mai International Airport",
|
| 23 |
-
"schedule": "2025-09-15T08:00:00Z",
|
| 24 |
-
"price": "1500 THB per person"
|
| 25 |
-
},
|
| 26 |
-
{
|
| 27 |
-
"mode": "Songthaew (Red Truck)",
|
| 28 |
-
"from": "Chiang Mai International Airport",
|
| 29 |
-
"to": "Chiang Mai Old City",
|
| 30 |
-
"schedule": "2025-09-15T11:00:00Z",
|
| 31 |
-
"price": "40 THB per person"
|
| 32 |
-
}
|
| 33 |
-
],
|
| 34 |
-
"accommodation": [
|
| 35 |
-
{
|
| 36 |
-
"type": "Guesthouse",
|
| 37 |
-
"location": "Chiang Mai Old City",
|
| 38 |
-
"contact": "053-211-111",
|
| 39 |
-
"notes": "Centrally located with air conditioning"
|
| 40 |
-
}
|
| 41 |
-
],
|
| 42 |
-
"tripRoute": [
|
| 43 |
-
{
|
| 44 |
-
"day": 1,
|
| 45 |
-
"activities": [
|
| 46 |
-
"Arrival at Chiang Mai International Airport",
|
| 47 |
-
"Check-in at guesthouse",
|
| 48 |
-
"Explore Chiang Mai Old City",
|
| 49 |
-
"Visit Wat Phra That Doi Suthep"
|
| 50 |
-
],
|
| 51 |
-
"walkingRoute": "Old City Moat",
|
| 52 |
-
"signage": "Follow city maps",
|
| 53 |
-
"suggestions": ["Wear comfortable shoes", "Try local street food"],
|
| 54 |
-
"precautions": ["Beware of traffic"]
|
| 55 |
-
},
|
| 56 |
-
{
|
| 57 |
-
"day": 2,
|
| 58 |
-
"activities": [
|
| 59 |
-
"Visit Elephant Nature Park",
|
| 60 |
-
"Participate in elephant conservation activities"
|
| 61 |
-
],
|
| 62 |
-
"transport": {
|
| 63 |
-
"mode": "Van",
|
| 64 |
-
"from": "Chiang Mai Old City",
|
| 65 |
-
"to": "Elephant Nature Park",
|
| 66 |
-
"price": "800 THB per person",
|
| 67 |
-
"duration_minutes": 60
|
| 68 |
-
},
|
| 69 |
-
"suggestions": ["Wear long sleeves", "Bring sunscreen"],
|
| 70 |
-
"precautions": ["Follow park guidelines"]
|
| 71 |
-
},
|
| 72 |
-
{
|
| 73 |
-
"day": 3,
|
| 74 |
-
"activities": [
|
| 75 |
-
"Trekking in Doi Suthep-Pui National Park",
|
| 76 |
-
"Visit waterfalls and scenic viewpoints"
|
| 77 |
-
],
|
| 78 |
-
"transport": {
|
| 79 |
-
"mode": "Songthaew (Red Truck)",
|
| 80 |
-
"from": "Chiang Mai Old City",
|
| 81 |
-
"to": "Doi Suthep-Pui National Park",
|
| 82 |
-
"price": "50 THB per person",
|
| 83 |
-
"duration_minutes": 45
|
| 84 |
-
},
|
| 85 |
-
"suggestions": ["Bring water and snacks", "Wear hiking boots"],
|
| 86 |
-
"precautions": ["Beware of slippery trails"]
|
| 87 |
-
},
|
| 88 |
-
{
|
| 89 |
-
"day": 4,
|
| 90 |
-
"activities": [
|
| 91 |
-
"Visit Chiang Rai Night Bazaar",
|
| 92 |
-
"Explore local markets and try street food"
|
| 93 |
-
],
|
| 94 |
-
"transport": {
|
| 95 |
-
"mode": "Bus",
|
| 96 |
-
"from": "Chiang Mai Arcade Bus Station",
|
| 97 |
-
"to": "Chiang Rai Night Bazaar",
|
| 98 |
-
"price": "200 THB per person",
|
| 99 |
-
"duration_minutes": 180
|
| 100 |
-
},
|
| 101 |
-
"suggestions": ["Bring cash", "Try local delicacies"],
|
| 102 |
-
"precautions": ["Beware of pickpockets"]
|
| 103 |
-
},
|
| 104 |
-
{
|
| 105 |
-
"day": 5,
|
| 106 |
-
"activities": [
|
| 107 |
-
"Return to Bangkok",
|
| 108 |
-
"Departure from Chiang Mai International Airport"
|
| 109 |
-
],
|
| 110 |
-
"transport": {
|
| 111 |
-
"mode": "Plane",
|
| 112 |
-
"from": "Chiang Mai International Airport",
|
| 113 |
-
"to": "Bangkok Suvarnabhumi Airport",
|
| 114 |
-
"price": "1500 THB per person",
|
| 115 |
-
"duration_minutes": 75
|
| 116 |
-
}
|
| 117 |
-
}
|
| 118 |
-
],
|
| 119 |
-
"emergencyContacts": {
|
| 120 |
-
"localRangers": "053-211-111",
|
| 121 |
-
"hospital": "Chiang Mai Ram Hospital: 053-211-111",
|
| 122 |
-
"embassy": "Thai Embassy: 02-281-0141"
|
| 123 |
-
},
|
| 124 |
-
"tips": [
|
| 125 |
-
"Always carry a map or use a GPS app",
|
| 126 |
-
"Respect local customs and traditions",
|
| 127 |
-
"Stay hydrated and wear sunscreen"
|
| 128 |
-
]
|
| 129 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Web Framework
|
| 2 |
+
fastapi==0.104.1
|
| 3 |
+
uvicorn[standard]==0.24.0
|
| 4 |
+
|
| 5 |
+
# Environment and Configuration
|
| 6 |
+
python-dotenv==1.0.0
|
| 7 |
+
|
| 8 |
+
# AI/ML Libraries
|
| 9 |
+
openai==1.3.7
|
| 10 |
+
sentence-transformers==2.2.2
|
| 11 |
+
torch==2.1.1
|
| 12 |
+
transformers==4.35.2
|
| 13 |
+
|
| 14 |
+
# Vector Database
|
| 15 |
+
qdrant-client==1.6.9
|
| 16 |
+
|
| 17 |
+
# YouTube Processing
|
| 18 |
+
youtube-transcript-api==0.6.1
|
| 19 |
+
|
| 20 |
+
# HTTP Client
|
| 21 |
+
httpx==0.25.2
|
| 22 |
+
|
| 23 |
+
# Data Processing
|
| 24 |
+
pydantic==2.5.0
|
| 25 |
+
typing-extensions==4.8.0
|
| 26 |
+
|
| 27 |
+
# Standard Libraries (usually included but explicit)
|
| 28 |
+
asyncio
|
| 29 |
+
json
|
| 30 |
+
os
|
| 31 |
+
time
|
| 32 |
+
datetime
|
| 33 |
+
uuid
|
| 34 |
+
re
|
| 35 |
+
|
| 36 |
+
# Optional: For better performance
|
| 37 |
+
uvloop==0.19.0 # Unix only
|
| 38 |
+
python-multipart==0.0.6
|
utils/llm_caller.py
CHANGED
|
@@ -1,196 +1,243 @@
|
|
| 1 |
import os
|
| 2 |
import asyncio
|
| 3 |
import httpx
|
|
|
|
| 4 |
from typing import List, Optional, Dict, Any
|
| 5 |
from dataclasses import dataclass
|
| 6 |
from qdrant_client import QdrantClient
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
|
|
|
| 8 |
SYSTEM_PROMPT = """You are a helpful travel assistant. Use the provided context to answer the user's question about travel destinations and places.
|
| 9 |
If the context doesn't contain relevant information, say so politely and provide general advice if possible."""
|
| 10 |
'''
|
| 11 |
'''
|
| 12 |
-
@dataclass
|
| 13 |
-
class RetrievedItem:
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
|
| 20 |
class LLMCaller:
|
| 21 |
def __init__(self):
|
| 22 |
# Environment variables
|
| 23 |
-
self.
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
self.top_k =
|
| 28 |
-
|
| 29 |
-
# LLM configuration
|
| 30 |
-
self.llm_api_url = os.getenv("LLM_API_URL", "https://api.openai.com/v1/chat/completions")
|
| 31 |
-
self.llm_api_key = os.getenv("LLM_API_KEY", "sk-REPLACE_ME")
|
| 32 |
-
|
| 33 |
-
# Initialize Qdrant client
|
| 34 |
self.qdrant = QdrantClient(
|
| 35 |
-
|
| 36 |
-
api_key=self.qdrant_api_key
|
| 37 |
)
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
-
async def
|
| 40 |
-
"""
|
| 41 |
-
Call LLM with system and user prompts
|
| 42 |
-
|
| 43 |
-
Args:
|
| 44 |
-
system_prompt (str): System message for the LLM
|
| 45 |
-
user_prompt (str): User message/question
|
| 46 |
-
max_tokens (int): Maximum tokens to generate
|
| 47 |
-
model (str): Model to use
|
| 48 |
-
|
| 49 |
-
Returns:
|
| 50 |
-
str: LLM response text
|
| 51 |
-
"""
|
| 52 |
-
headers = {
|
| 53 |
-
"Authorization": f"Bearer {self.llm_api_key}",
|
| 54 |
-
"Content-Type": "application/json",
|
| 55 |
-
}
|
| 56 |
-
payload = {
|
| 57 |
-
"model": model,
|
| 58 |
-
"messages": [
|
| 59 |
-
{"role": "system", "content": system_prompt},
|
| 60 |
-
{"role": "user", "content": user_prompt}
|
| 61 |
-
],
|
| 62 |
-
"max_tokens": max_tokens,
|
| 63 |
-
"temperature": 0.7,
|
| 64 |
-
}
|
| 65 |
|
| 66 |
try:
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
except Exception as e:
|
| 80 |
print(f"Error calling LLM: {e}")
|
| 81 |
return f"Error: Unable to get LLM response - {str(e)}"
|
| 82 |
|
| 83 |
-
async def
|
| 84 |
"""
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
Args:
|
| 88 |
-
query_embedding (List[float]): Query vector embedding
|
| 89 |
-
top_k (int, optional): Number of results to return
|
| 90 |
-
collection_name (str, optional): Collection name to query
|
| 91 |
-
|
| 92 |
-
Returns:
|
| 93 |
-
List[RetrievedItem]: Retrieved items from Qdrant
|
| 94 |
"""
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
)
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
place_name=payload.get("name") or payload.get("title") or "",
|
| 113 |
-
description=payload.get("description") or payload.get("summary") or None,
|
| 114 |
-
score=float(h.score) if h.score is not None else 0.0,
|
| 115 |
-
metadata=payload,
|
| 116 |
-
))
|
| 117 |
-
return items
|
| 118 |
-
except Exception as e:
|
| 119 |
-
print(f"Error querying Qdrant: {e}")
|
| 120 |
-
return []
|
| 121 |
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
for item in retrieved_items:
|
| 131 |
-
context_parts.append(f"- {item.place_name}: {item.description or 'No description available'}")
|
| 132 |
-
|
| 133 |
-
context = "\n".join(context_parts) if context_parts else "No relevant information found."
|
| 134 |
-
|
| 135 |
-
# Default system prompt if none provided
|
| 136 |
-
if not system_prompt:
|
| 137 |
-
system_prompt = """You are a helpful travel assistant. Use the provided context to answer the user's question about travel destinations and places.
|
| 138 |
-
If the context doesn't contain relevant information, say so politely and provide general advice if possible."""
|
| 139 |
-
|
| 140 |
-
# Create user prompt with context
|
| 141 |
-
user_prompt = f"""Context:
|
| 142 |
-
{context}
|
| 143 |
|
| 144 |
-
|
|
|
|
| 145 |
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
|
| 189 |
-
|
| 190 |
-
response = await llm_caller.call_llm(
|
| 191 |
-
system_prompt="You are a helpful assistant.",
|
| 192 |
-
user_prompt="What is the capital of Thailand?"
|
| 193 |
-
)
|
| 194 |
-
print("Direct LLM Response:", response)
|
| 195 |
-
|
| 196 |
-
asyncio.run(main())
|
|
|
|
| 1 |
import os
|
| 2 |
import asyncio
|
| 3 |
import httpx
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
from typing import List, Optional, Dict, Any
|
| 6 |
from dataclasses import dataclass
|
| 7 |
from qdrant_client import QdrantClient
|
| 8 |
+
from openai import OpenAI
|
| 9 |
+
from sentence_transformers import SentenceTransformer
|
| 10 |
+
from interface import PlanResponse, TripPlan, PlanStep, TransportInfo, RetrievedItem, PlanRequest
|
| 11 |
+
import json
|
| 12 |
|
| 13 |
+
load_dotenv()
|
| 14 |
SYSTEM_PROMPT = """You are a helpful travel assistant. Use the provided context to answer the user's question about travel destinations and places.
|
| 15 |
If the context doesn't contain relevant information, say so politely and provide general advice if possible."""
|
| 16 |
'''
|
| 17 |
'''
|
| 18 |
+
# @dataclass
|
| 19 |
+
# class RetrievedItem:
|
| 20 |
+
# place_id: str
|
| 21 |
+
# place_name: str
|
| 22 |
+
# description: Optional[str]
|
| 23 |
+
# score: float
|
| 24 |
+
# metadata: Dict[str, Any]
|
| 25 |
|
| 26 |
class LLMCaller:
|
| 27 |
def __init__(self):
|
| 28 |
# Environment variables
|
| 29 |
+
self.client = OpenAI(
|
| 30 |
+
api_key=os.getenv("SEALION_API"),
|
| 31 |
+
base_url=os.getenv("SEALION_BASE_URL"),
|
| 32 |
+
)
|
| 33 |
+
self.top_k = 3
|
| 34 |
+
self.qdrant_host = os.getenv("QDRANT_HOST")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
self.qdrant = QdrantClient(
|
| 36 |
+
url=self.qdrant_host,
|
|
|
|
| 37 |
)
|
| 38 |
+
self.system_prompt = SYSTEM_PROMPT
|
| 39 |
+
self.embedding_model = SentenceTransformer("BAAI/bge-m3")
|
| 40 |
+
self.collection_name = "demo_bge_m3"
|
| 41 |
|
| 42 |
+
async def basic_query(self, user_prompt: str, max_tokens: int = 1024, model: str = "aisingapore/Gemma-SEA-LION-v3-9B-IT") -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
try:
|
| 45 |
+
completion = self.client.chat.completions.create(
|
| 46 |
+
model=model,
|
| 47 |
+
messages=[
|
| 48 |
+
{
|
| 49 |
+
"role": "system",
|
| 50 |
+
"content": self.system_prompt
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"role": "user",
|
| 54 |
+
"content": user_prompt
|
| 55 |
+
}
|
| 56 |
+
]
|
| 57 |
+
)
|
| 58 |
+
return completion.choices[0].message.content
|
| 59 |
|
| 60 |
except Exception as e:
|
| 61 |
print(f"Error calling LLM: {e}")
|
| 62 |
return f"Error: Unable to get LLM response - {str(e)}"
|
| 63 |
|
| 64 |
+
async def query_with_rag(self, plan_request: PlanRequest, collection_name: Optional[str] = None) -> 'PlanResponse':
|
| 65 |
"""
|
| 66 |
+
Perform RAG query using PlanRequest, embed query, search Qdrant, and generate complete PlanResponse via LLM
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
"""
|
| 68 |
+
print(plan_request)
|
| 69 |
+
try:
|
| 70 |
+
# 1. Create query string from PlanRequest
|
| 71 |
+
query_text = f"Trip from {plan_request.start_place} to {plan_request.destination_place}"
|
| 72 |
+
if plan_request.trip_context:
|
| 73 |
+
query_text += f" for {plan_request.trip_context}"
|
| 74 |
+
if plan_request.trip_duration_days:
|
| 75 |
+
query_text += f" for {plan_request.trip_duration_days} days"
|
| 76 |
+
if plan_request.trip_price:
|
| 77 |
+
query_text += f" with budget {plan_request.trip_price}"
|
| 78 |
+
|
| 79 |
+
# 2. Generate embedding for the query
|
| 80 |
+
query_embedding = self.embedding_model.encode(query_text, normalize_embeddings=True).tolist()
|
| 81 |
+
|
| 82 |
+
# 3. Search Qdrant for similar content
|
| 83 |
+
collection = collection_name or self.collection_name
|
| 84 |
+
top_k = plan_request.top_k or self.top_k
|
| 85 |
+
|
| 86 |
+
search_results = self.qdrant.search(
|
| 87 |
+
collection_name=collection,
|
| 88 |
+
query_vector=query_embedding,
|
| 89 |
+
limit=top_k,
|
| 90 |
+
with_payload=True
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
# 4. Convert search results to RetrievedItem format
|
| 94 |
+
retrieved_data = []
|
| 95 |
+
context_text = ""
|
| 96 |
+
|
| 97 |
+
for result in search_results:
|
| 98 |
+
retrieved_item = RetrievedItem(
|
| 99 |
+
place_id=str(result.id),
|
| 100 |
+
place_name=result.payload.get("place_name", "Unknown"),
|
| 101 |
+
description=result.payload.get("text", ""),
|
| 102 |
+
score=result.score,
|
| 103 |
+
metadata=result.payload
|
| 104 |
)
|
| 105 |
+
retrieved_data.append(retrieved_item)
|
| 106 |
+
context_text += f"\n{result.payload.get('text', '')}"
|
| 107 |
+
|
| 108 |
+
# 5. Create detailed prompt for LLM to generate structured response
|
| 109 |
+
llm_prompt = f"""
|
| 110 |
+
You are a travel planning assistant. Based on the trip request and travel context provided, generate a comprehensive trip plan in the exact JSON format specified below.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
|
| 112 |
+
Trip Request:
|
| 113 |
+
- From: {plan_request.start_place}
|
| 114 |
+
- To: {plan_request.destination_place}
|
| 115 |
+
- Duration: {plan_request.trip_duration_days} days
|
| 116 |
+
- Budget: {plan_request.trip_price}
|
| 117 |
+
- Context: {plan_request.trip_context}
|
| 118 |
+
- Group Size: {plan_request.group_size}
|
| 119 |
+
- Preferences: {plan_request.preferences}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
+
Relevant Travel Context:
|
| 122 |
+
{context_text}
|
| 123 |
|
| 124 |
+
Generate a response in this EXACT JSON format (no additional text before or after):
|
| 125 |
+
{{
|
| 126 |
+
"tripOverview": "A comprehensive 2-3 paragraph overview of the entire trip",
|
| 127 |
+
"trip_plan": {{
|
| 128 |
+
"overview": "Brief summary of the trip plan",
|
| 129 |
+
"total_estimated_cost": estimated_total_cost_as_number,
|
| 130 |
+
"steps": [
|
| 131 |
+
{{
|
| 132 |
+
"day": 1,
|
| 133 |
+
"title": "Day 1 title",
|
| 134 |
+
"description": "Detailed description of day 1 activities",
|
| 135 |
+
"transport": {{
|
| 136 |
+
"mode": "transportation method",
|
| 137 |
+
"departure": "departure location",
|
| 138 |
+
"arrival": "arrival location",
|
| 139 |
+
"duration_minutes": estimated_duration_in_minutes,
|
| 140 |
+
"price": estimated_price,
|
| 141 |
+
"details": "additional transport details"
|
| 142 |
+
}},
|
| 143 |
+
"map_coordinates": {{"lat": latitude_number, "lon": longitude_number}},
|
| 144 |
+
"images": ["url1", "url2"],
|
| 145 |
+
"tips": ["tip1", "tip2", "tip3"]
|
| 146 |
+
}}
|
| 147 |
+
]
|
| 148 |
+
}}
|
| 149 |
+
}}
|
| 150 |
+
Don't Explain or add any additional text outside the JSON format.
|
| 151 |
+
Ensure the JSON is valid and well-structured.
|
| 152 |
|
| 153 |
+
Create {plan_request.trip_duration_days or 1} days of detailed activities. Include realistic prices, coordinates, and practical tips. Make it specific to the destinations and context provided.
|
| 154 |
+
"""
|
| 155 |
+
|
| 156 |
+
# 6. Call LLM to generate structured trip plan
|
| 157 |
+
llm_response = await self.basic_query(user_prompt=llm_prompt, max_tokens=12048)
|
| 158 |
+
|
| 159 |
+
# 7. Parse LLM response as JSON
|
| 160 |
+
try:
|
| 161 |
+
# Clean the response and parse JSON
|
| 162 |
+
json_str = llm_response.strip()
|
| 163 |
+
if json_str.startswith("```json"):
|
| 164 |
+
json_str = json_str[7:]
|
| 165 |
+
if json_str.endswith("```"):
|
| 166 |
+
json_str = json_str[:-3]
|
| 167 |
+
|
| 168 |
+
llm_data = json.loads(json_str)
|
| 169 |
+
|
| 170 |
+
# Convert to PlanResponse structure
|
| 171 |
+
trip_plan_data = llm_data.get("trip_plan", {})
|
| 172 |
+
steps_data = trip_plan_data.get("steps", [])
|
| 173 |
+
|
| 174 |
+
# Convert steps to PlanStep objects
|
| 175 |
+
plan_steps = []
|
| 176 |
+
for step in steps_data:
|
| 177 |
+
transport_data = step.get("transport", {})
|
| 178 |
+
transport = TransportInfo(
|
| 179 |
+
mode=transport_data.get("mode"),
|
| 180 |
+
departure=transport_data.get("departure"),
|
| 181 |
+
arrival=transport_data.get("arrival"),
|
| 182 |
+
duration_minutes=transport_data.get("duration_minutes"),
|
| 183 |
+
price=transport_data.get("price"),
|
| 184 |
+
details=transport_data.get("details")
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
plan_step = PlanStep(
|
| 188 |
+
day=step.get("day"),
|
| 189 |
+
title=step.get("title"),
|
| 190 |
+
description=step.get("description"),
|
| 191 |
+
transport=transport,
|
| 192 |
+
map_coordinates=step.get("map_coordinates", {}),
|
| 193 |
+
images=step.get("images", []),
|
| 194 |
+
tips=step.get("tips", [])
|
| 195 |
+
)
|
| 196 |
+
plan_steps.append(plan_step)
|
| 197 |
+
|
| 198 |
+
trip_plan = TripPlan(
|
| 199 |
+
overview=trip_plan_data.get("overview", ""),
|
| 200 |
+
total_estimated_cost=trip_plan_data.get("total_estimated_cost"),
|
| 201 |
+
steps=plan_steps
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
return PlanResponse(
|
| 205 |
+
tripOverview=llm_data.get("tripOverview", ""),
|
| 206 |
+
query_params=plan_request,
|
| 207 |
+
retrieved_data=retrieved_data,
|
| 208 |
+
trip_plan=trip_plan,
|
| 209 |
+
meta={
|
| 210 |
+
"status": "success",
|
| 211 |
+
"query_text": query_text,
|
| 212 |
+
"results_count": len(retrieved_data)
|
| 213 |
+
}
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
except json.JSONDecodeError as e:
|
| 217 |
+
print(f"Error parsing LLM JSON response: {e}")
|
| 218 |
+
print(f"LLM Response: {llm_response}")
|
| 219 |
+
|
| 220 |
+
# Fallback: create basic response with LLM text
|
| 221 |
+
return PlanResponse(
|
| 222 |
+
tripOverview=llm_response,
|
| 223 |
+
query_params=plan_request,
|
| 224 |
+
retrieved_data=retrieved_data,
|
| 225 |
+
trip_plan=TripPlan(
|
| 226 |
+
overview="Generated plan (parsing error)",
|
| 227 |
+
total_estimated_cost=plan_request.trip_price,
|
| 228 |
+
steps=[]
|
| 229 |
+
),
|
| 230 |
+
meta={"status": "json_parse_error", "error": str(e)}
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
except Exception as e:
|
| 234 |
+
print(f"Error in RAG query: {e}")
|
| 235 |
+
return PlanResponse(
|
| 236 |
+
tripOverview=f"Error generating trip plan: {str(e)}",
|
| 237 |
+
query_params=plan_request,
|
| 238 |
+
retrieved_data=[],
|
| 239 |
+
trip_plan=TripPlan(overview="Error occurred", total_estimated_cost=0.0, steps=[]),
|
| 240 |
+
meta={"status": "error", "error": str(e)}
|
| 241 |
+
)
|
| 242 |
|
| 243 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/youtube_extractor.py
CHANGED
|
@@ -7,7 +7,7 @@ class YoutubeExtractor:
|
|
| 7 |
|
| 8 |
def extract_transcript(self, video_id: str) -> Optional[List[Dict]]:
|
| 9 |
try:
|
| 10 |
-
transcript = self.ytt_api.fetch(video_id)
|
| 11 |
return transcript
|
| 12 |
except Exception as e:
|
| 13 |
print(f"An error occurred: {e}")
|
|
@@ -15,7 +15,7 @@ class YoutubeExtractor:
|
|
| 15 |
def get_text_only(self, video_id: str) -> Optional[List[str]]:
|
| 16 |
transcript = self.extract_transcript(video_id)
|
| 17 |
if transcript:
|
| 18 |
-
return [entry
|
| 19 |
return None
|
| 20 |
|
| 21 |
def get_full_text(self, video_id: str) -> Optional[str]:
|
|
|
|
| 7 |
|
| 8 |
def extract_transcript(self, video_id: str) -> Optional[List[Dict]]:
|
| 9 |
try:
|
| 10 |
+
transcript = self.ytt_api.fetch(video_id,languages=['en', 'th'])
|
| 11 |
return transcript
|
| 12 |
except Exception as e:
|
| 13 |
print(f"An error occurred: {e}")
|
|
|
|
| 15 |
def get_text_only(self, video_id: str) -> Optional[List[str]]:
|
| 16 |
transcript = self.extract_transcript(video_id)
|
| 17 |
if transcript:
|
| 18 |
+
return [entry.text for entry in transcript]
|
| 19 |
return None
|
| 20 |
|
| 21 |
def get_full_text(self, video_id: str) -> Optional[str]:
|