# Run the file using the below command
# streamlit run AI_Travel_Agent_streamlit.py

# Importing necessary modules
import os
import re
import time
import streamlit as st
import requests
from amadeus import Client
from dotenv import load_dotenv
from huggingface_hub import hf_hub_download
from langchain_community.llms import LlamaCpp
from langchain_community.chat_models import ChatOpenAI
from langchain_core.callbacks import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.utilities import GoogleSerperAPIWrapper
from langchain.tools import Tool
from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit
from langchain_community.tools.amadeus.closest_airport import AmadeusClosestAirport
from langchain_community.tools.amadeus.flight_search import AmadeusFlightSearch
from langchain.agents import AgentExecutor, StructuredChatAgent
from langchain_community.agent_toolkits.load_tools import load_tools
from langchain.agents import create_react_agent
from langchain import hub
import json

# Loading the secret API keys from a .env file into the environment.
load_dotenv()

# Set a dummy OpenAI API key for Amadeus toolkit
os.environ["OPENAI_API_KEY"] = "dummy-key"

# OpenWeatherMap API key
OPENWEATHER_API_KEY = os.getenv("OPENWEATHER_API_KEY")

@st.cache_resource(show_spinner=False)
def create_llm(
    model_name: str = "Llama-3.2-3B-Instruct-Hybrid",
    temperature: float = 0.0
) -> ChatOpenAI:
    """
    Create and initialize a ChatOpenAI client that connects to a local
    Lemonade Server via OpenAI-compatible HTTP API.

    Args:
        model_name (str): The identifier of the model hosted on Lemonade Server.
        temperature (float): Sampling temperature for responses (0.0 = deterministic).

    Returns:
        ChatOpenAI: An LLM client instance for generating chat completions.
    """
    try:
        # Base URL of the local Lemonade Server's OpenAI-compatible API
        base_url = "http://localhost:8000/api/v0"

        # Instantiate the ChatOpenAI client with local API settings
        llm = ChatOpenAI(
            model_name=model_name,           # ID of the model to use
            temperature=temperature,         # Controls randomness
            openai_api_base=base_url,        # Custom API endpoint
            openai_api_key="none",           # API key placeholder (not required)
            verbose=False                    # Disable verbose logging
        )
        return llm

    except Exception as e:
        # Display an error message in Streamlit if client creation fails
        st.error(f"Failed to create LLM client: {e}")
        return None

# Tools
def get_google_search_tools():
    """
    Initialize Google search tools for performing web searches.
    Here we are using the GoogleSerperAPIWrapper along with SerpAPI tool.
    It is used perform web searches and retrieve search results.

    Returns:
        list: GoogleSerperAPIWrapper and SerpAPI tools

    Raises:
        Exception: If there is an error during the loading of the Google search tool, a streamlit error is displayed

    """
    try:
        # Initialize the search wrapper to perform Google searches
        search = GoogleSerperAPIWrapper()
        google_search_tool = Tool(
            name="Google Search tool",
            func=search.run,
            description="useful for when you need to ask with search",
        )

        tools = [google_search_tool] + load_tools(["serpapi"])

        return tools
    except Exception as e:
        st.error(f"Error loading the google search tool: {e}")

def get_weather_info(location):
    """
    Get current weather information for a given location using OpenWeatherMap API.
    
    Args:
        location (str): The city name or "city,country" format
        
    Returns:
        str: Weather information formatted as a string
    """
    # Check if API key is available
    if not OPENWEATHER_API_KEY:
        return "Error: OpenWeatherMap API key not found. Please set OPENWEATHER_API_KEY in your .env file."
    
    try:
        # OpenWeatherMap API endpoint for current weather
        base_url = "http://api.openweathermap.org/data/2.5/weather"
        
        # Parameters for the API request
        params = {
            "q": location,
            "appid": OPENWEATHER_API_KEY,
            "units": "metric"  # Use Celsius for temperature
        }
        
        # Make the API request
        response = requests.get(base_url, params=params)
        response.raise_for_status()
        
        # Parse the JSON response
        weather_data = response.json()
        
        # Extract relevant weather information
        city = weather_data["name"]
        country = weather_data["sys"]["country"]
        temperature = weather_data["main"]["temp"]
        feels_like = weather_data["main"]["feels_like"]
        humidity = weather_data["main"]["humidity"]
        weather_description = weather_data["weather"][0]["description"]
        weather_main = weather_data["weather"][0]["main"]
        wind_speed = weather_data["wind"]["speed"]
        
        # Format the weather information
        weather_info = f"""
Current Weather in {city}, {country}:
- Temperature: {temperature}°C (feels like {feels_like}°C)
- Conditions: {weather_main} - {weather_description}
- Humidity: {humidity}%
- Wind Speed: {wind_speed} m/s
        """
        
        return weather_info.strip()
        
    except requests.exceptions.RequestException as e:
        return f"Error fetching weather data: {str(e)}"
    except KeyError as e:
        return f"Error parsing weather data: {str(e)}"
    except Exception as e:
        return f"Unexpected error: {str(e)}"

def get_weather_tools():
    """
    Initialize weather tools for getting weather information.
    
    Returns:
        list: Weather tools for the agent
    """
    try:
        weather_tool = Tool(
            name="Weather Information",
            func=get_weather_info,
            description="Get current weather information for a specific location. Use this tool when users ask about weather conditions in a city. Input should be a city name or 'city,country' format.",
        )
        
        return [weather_tool]
    except Exception as e:
        st.error(f"Error loading the weather tool: {e}")
        return []

# Prompt Template

def create_prompt_template():
    """
    The following Prompt template is for the Structured chat agent and is customised to handle the travel related queries.
    """

    PREFIX = """[INST]Respond to the human as helpfully and accurately as possible. You have access to the following tools:"""

    FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).

    Use the closest_airport tool and single_flight_search tool for any flight related queries.
    Give all the flight details including Flight Number, Carrier, Departure time, Arrival time and Terminal details to the human.
    Use the Weather Information tool for weather-related queries about specific locations.
    Provide current weather conditions, temperature, humidity, and weather description to the human.
    Use the Google Search tool and knowledge base for any itinerary-related queries. Give all the detailed information on tourist attractions, must-visit places, and hotels with ratings to the human.
    Use the Google Search tool for distance calculations. Give all the web results to the human.
    Always consider the traveler's preferences, budget constraints, and any specific requirements mentioned in their query.
    Valid "action" values: "Final Answer" or {tool_names}
    Provide only ONE action per $JSON_BLOB, as shown:
    ```
    {{{{
      "action": $TOOL_NAME,
      "action_input": $INPUT
    }}}}
    ```

    Follow this format:

    Question: input question to answer
    Thought: consider previous and subsequent steps
    Action:
    ```
    $JSON_BLOB
    ```
    Observation: action result
    ... (repeat Thought/Action/Observation N times)
    Thought: I know what to respond
    Action:
    ```
    {{{{
      "action": "Final Answer",
      "action_input": "Provide the detailed Final Answer to the human"
    }}}}
    ```[/INST]"""

    SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate.
    Format is Action:```$JSON_BLOB```then Observation:.
    Thought:[INST]"""

    HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}"

    return PREFIX, FORMAT_INSTRUCTIONS, SUFFIX, HUMAN_MESSAGE_TEMPLATE


# Agent
def create_react_agent_func(llm, tools):
    """
    Create a ReAct agent using the standard LangChain ReAct prompt template.
    This should work better than the StructuredChatAgent for tool execution.
    """
    try:
        # Get the standard ReAct prompt template
        prompt = hub.pull("hwchase17/react")
        
        # Create the ReAct agent
        agent = create_react_agent(llm, tools, prompt)
        
        return agent
    except Exception as e:
        st.error(f"Error creating ReAct agent: {e}")
        return None

def create_agent(llm, tools, PREFIX, SUFFIX, HUMAN_MESSAGE_TEMPLATE, FORMAT_INSTRUCTIONS):
    """
    Create a StructuredChatAgent with llm and tools.
    Initialize a StructuredChatAgent using the LLM, prompt template and tools

    llm : LLM to be used

    tools : list
        List of tools to use

    PREFIX : str
        Prefix string prepended to the agent's input.

    SUFFIX : str
        Suffix string appended to the agent's input.

    HUMAN_MESSAGE_TEMPLATE : str
        Template defining the structure of human messages.

    FORMAT_INSTRUCTIONS : str
        Format instructions for the agent

    Returns:
        StructuredChatAgent configured with LLM and tools.

    Raises:
        Exception : If there is any error during the agent creation, a streamlit error is displayed.
    """
    try:
        agent = StructuredChatAgent.from_llm_and_tools(
            llm,                                            # LLM to use
            tools,                                          # Tools available for the agent
            prefix=PREFIX,                                  # Prefix to prepend to the input
            suffix=SUFFIX,                                  # Suffix to append to the input
            human_message_template=HUMAN_MESSAGE_TEMPLATE,  # Template for human messages
            # Instructions for formatting responses
            format_instructions=FORMAT_INSTRUCTIONS,
        )
        return agent
    except Exception as e:
        st.error(f"Error creating the agent: {e}")


def run_agent(agent, tools):
    """
    Create and configure an AgentExecutor for a ReAct agent with specified tools.
    """
    try:
        if agent is None:
            return None
            
        agent_executor = AgentExecutor(
            agent=agent,
            tools=tools,
            verbose=True,
            handle_parsing_errors=True,
            max_iterations=5,
            return_intermediate_steps=True  # Return intermediate steps for debugging
        )
        return agent_executor
    except Exception as e:
        st.error(f"An error occurred: {e}")
        return None


# Streamlit code starts here
# Main title and description
def streamlit_UI():
    """
    Streamlit UI function starts here. Here we are creating a title, header, sidebar with some sample questions.
    The agent excecutor will take the question as input from the streamlit UI, process the output and streams the output response word by word onto the UI.

    Raises:
        Exception: If there is any error during the fetching or streaming the response, a streamlit error is displayed.
    """
    st.title(":airplane: AI Travel Agent on AMD Ryzen AI")
    st.write("This Langchain-powered AI Travel Agent is designed to assist you with quick travel-related queries by Local LLM running on AMD Ryzen AI PC with Lemonade Server :lemon:. \
    You can request **flight details** for a specific day, find **nearby airports** by location, get **current weather information** for any city, or ask travel-related questions. For other queries, we use **Google Search** for the latest information.")

    # Create a fragment for the sidebar
    with st.sidebar:
        st.title(":gear: Model Selection")
        model_options = [
            "Llama-3.2-1B-Instruct-Hybrid",
            "Llama-3.2-3B-Instruct-Hybrid",
            "Phi-3-Mini-Instruct-Hybrid",
            "Phi-3.5-Mini-Instruct-Hybrid",
            "Qwen-1.5-7B-Chat-Hybrid",
            "DeepSeek-R1-Distill-Llama-8B-Hybrid",
            "DeepSeek-R1-Distill-Qwen-7B-Hybrid",
            "Mistral-7B-v0.3-Instruct-Hybrid",
            "Llama-3.1-8B-Instruct-Hybrid",
            "Qwen3-8B-GGUF",
            "DeepSeek-Qwen3-8B-GGUF"
        ]
        
        selected_model = st.selectbox(
            "Choose a Model",
            model_options,
            index=0,
            help="Select the LLM model to use for generating responses"
        )

        st.title(":bulb: Example Queries")
        st.write("Here are some questions you can ask:")
        predefined_questions = [
            "What are the best hidden gems to visit in Tokyo?",
            "Find me the cheapest flights from San Francisco to Paris next week",
            "What's the weather like in Taipei today?",
            "What are the top-rated hotels near Times Square in New York?",
            "What's the best time to visit Bali and what should I pack?",
            "What are the must-try local foods in Beijing?",
            "What's the weather forecast for London right now?"
        ]

        # Store the selected question from sidebar
        selected_sidebar_question = st.radio(
            "Choose a Question", predefined_questions)

    # Create a fragment for the main content
    with st.container():
        # For important notes
        st.markdown("#### **:warning: Important Notes**")
        st.write("""
        - Include your **starting location**, **destination**, and **travel date** when requesting flight details.
        - Always **verify important information**, as the agent may make mistakes.
        """)

        # Additional instruction in a block quote
        st.markdown(
            "> **:notebook: Quick Tip:** Check the **side-bar** for more examples to guide you!")

        # creating columns
        col1, col2 = st.columns([6, 1])

        with col1:
            question = st.text_area(
                "Your Question",
                value=selected_sidebar_question, 
                key="question_input",
                label_visibility="collapsed")
            
        with col2:
            st.write("")
            st.write("")
            st.write("")
            submit = st.button(":mag: Submit")

    if submit:
        if not question.isdigit() and re.search(r'[A-Za-z]', question):
            try:
                with st.spinner("Generating answer..."):
                    # Update LLM with selected model
                    llm = create_llm(model_name=selected_model)
                    
                    # Update Amadeus toolkit with new LLM
                    amadeus_toolkit = AmadeusToolkit(client=amadeus, llm=llm)
                    tools = get_google_search_tools() + get_weather_tools() + amadeus_toolkit.get_tools()
                    
                    # Display available tools
                    st.write("**Available Tools:**")
                    for tool in tools:
                        st.write(f"- {tool.name}: {tool.description}")

                    with st.expander("Agent Execution", expanded=True):
                        # Use simple custom Agent
                        result = simple_agent_execute(llm, tools, question)
                        
                        # Display tool usage
                        if result['tool_used'] != 'none' and result['tool_used'] != 'error':
                            st.write("**🔧 Tool Usage:**")
                            st.write(f"- Tool: {result['tool_used']}")
                            st.write(f"- Input: {result['tool_input']}")
                            st.write(f"- Result: {result['tool_result']}")
                            st.write("---")
                        
                        # Display final answer
                        st.write("**🎯 Final Answer:**")
                        st.write(result['final_answer'])
            except Exception as e:
                st.error(f"An error occurred: {e}")
        else:
            st.error("Invalid question input. Please enter a valid question.")


# Initialization of amadeus toolkit starts here
try:
    amadeus_client_secret = os.getenv("AMADEUS_CLIENT_SECRET")
    amadeus_client_id = os.getenv("AMADEUS_CLIENT_ID")
    amadeus = Client(client_id=amadeus_client_id,
                     client_secret=amadeus_client_secret)
    
    # Initialize a temporary LLM for toolkit initialization
    temp_llm = ChatOpenAI(
        model_name="gpt-3.5-turbo",
        temperature=0,
        openai_api_key="dummy-key"
    )
    
    amadeus_toolkit = AmadeusToolkit(client=amadeus, llm=temp_llm)
    AmadeusToolkit.model_rebuild()
    AmadeusClosestAirport.model_rebuild()
    AmadeusFlightSearch.model_rebuild()

    tools = get_google_search_tools() + get_weather_tools() + amadeus_toolkit.get_tools()

except Exception as e:
    st.error(f"Error loading the amadeus toolkit : {e}")

# calling the create_prompt_template function here
PREFIX, FORMAT_INSTRUCTIONS, SUFFIX, HUMAN_MESSAGE_TEMPLATE = create_prompt_template()

# calling the create_agent function here
try:
    agent = None  # Initialize as None, will be set in streamlit_UI
except Exception as e:
    st.error(f"Error loading the agent with tools : {e}")

# calling the run_agent function here
agent_executor = None  # Initialize as None, will be set in streamlit_UI

def format_flight_results(flights):
    """Format flight search results into a readable string."""
    if not flights:
        return "No flights found for the specified criteria."
    
    formatted_results = []
    for flight in flights:
        # Get the first itinerary
        itinerary = flight['itineraries'][0]
        segments = itinerary['segments']
        
        # Format departure and arrival
        departure = segments[0]['departure']
        arrival = segments[-1]['arrival']
        
        # Get airline info
        airline = segments[0]['carrierCode']
        
        # Format price
        price = flight['price']['total']
        currency = flight['price']['currency']
        
        # Create flight summary
        flight_summary = f"""
Flight: {airline} {segments[0]['number']}
From: {departure['iataCode']} ({departure['terminal'] if 'terminal' in departure else 'N/A'})
To: {arrival['iataCode']} ({arrival['terminal'] if 'terminal' in arrival else 'N/A'})
Departure: {departure['at']}
Arrival: {arrival['at']}
Price: {price} {currency}
"""
        formatted_results.append(flight_summary)
    
    return "\n".join(formatted_results)

def simple_agent_execute(llm, tools, question):
    """
    Simple agent that directly calls tools based on LLM decision.
    This avoids the parsing issues with ReAct agent.
    """
    try:
        # Create a mapping of tool names to tool functions
        tool_map = {tool.name: tool for tool in tools}
        
        # Create a simple prompt to decide which tool to use
        tool_names = ", ".join([tool.name for tool in tools])
        
        decision_prompt = f"""
You are a travel agent assistant. You have access to the following tools:
{tool_names}

For the user question: "{question}"

Decide which tool to use and provide the input. Respond in this exact format:
TOOL: [tool_name]
INPUT: [tool_input]

If no tool is needed, respond with:
TOOL: none
INPUT: none

Available tools:
- "Weather Information": for weather queries (input: city name)
- "Google Search tool": for general information and flight prices (input: search query)
- "closest_airport": for finding airports (input: city name or location)
- "single_flight_search": for flight searches using Amadeus API

For flight searches:
- First use "closest_airport" to find airport codes for cities
- Then use "single_flight_search" with proper parameters

For flight price research, you can also use "Google Search tool" with queries like:
"cheapest flights San Francisco to Paris February 2024"

Question: {question}
"""
        
        # Get LLM decision
        response = llm.invoke(decision_prompt)
        decision = response.content if hasattr(response, 'content') else str(response)
        
        # Parse the decision
        lines = decision.strip().split('\n')
        tool_name = None
        tool_input = None
        
        for line in lines:
            if line.startswith('TOOL:'):
                tool_name = line.replace('TOOL:', '').strip()
            elif line.startswith('INPUT:'):
                tool_input = line.replace('INPUT:', '').strip()
        
        # Execute the tool if needed
        if tool_name and tool_name != 'none' and tool_name in tool_map:
            tool = tool_map[tool_name]
            
            try:
                # Handle different tool types
                if hasattr(tool, 'func'):
                    # Standard Tool with func attribute
                    tool_result = tool.func(tool_input)
                elif hasattr(tool, '_run'):
                    # LangChain BaseTool with _run method
                    tool_result = tool._run(tool_input)
                elif hasattr(tool, 'run'):
                    # Tools with run method
                    tool_result = tool.run(tool_input)
                else:
                    # Try calling the tool directly
                    tool_result = tool(tool_input)
            except Exception as tool_error:
                tool_result = f"Error executing tool {tool_name}: {str(tool_error)}"
            
            # Generate final answer based on tool result
            final_prompt = f"""
Based on the tool result below, provide a helpful and detailed answer to the user's question.

User Question: {question}
Tool Used: {tool_name}
Tool Result: {tool_result}

Provide a comprehensive answer:
"""
            
            final_response = llm.invoke(final_prompt)
            final_answer = final_response.content if hasattr(final_response, 'content') else str(final_response)
            
            return {
                'tool_used': tool_name,
                'tool_input': tool_input,
                'tool_result': tool_result,
                'final_answer': final_answer
            }
        else:
            # No tool needed, direct answer
            direct_prompt = f"""
Answer the following travel-related question directly:

{question}
"""
            direct_response = llm.invoke(direct_prompt)
            direct_answer = direct_response.content if hasattr(direct_response, 'content') else str(direct_response)
            
            return {
                'tool_used': 'none',
                'tool_input': 'none',
                'tool_result': 'none',
                'final_answer': direct_answer
            }
            
    except Exception as e:
        return {
            'tool_used': 'error',
            'tool_input': 'error',
            'tool_result': str(e),
            'final_answer': f"Sorry, I encountered an error: {str(e)}"
        }

# calling the streamlit_UI function here
streamlit_UI()
