# -*- coding: utf-8 -*-
# @Author: Tim Liu
# @Date: 2024-05-07
# @Last Modified by: Tim Liu
# @Last Modified time: 2024-05-07

# @Description: chat openai service (openai & azure openai)

import os

#import openai
from langchain_openai import AzureChatOpenAI

from config.settings import *

class ChatOpenAIService(object):
    """
    Azure OpenAI & OpenAI Chat Service, depends on the openai & azure openai APIs. 
    
    About Azure OpenAI: https://learn.microsoft.com/en-us/azure/ai-services/openai/overview

    """    
    def __init__(self, callbacks):
        self.user = None
        self.callbacks = callbacks

    def init_config(self):
        os.environ["AZURE_OPENAI_API_KEY"] = OPENAI_CHAT_API_KEY
        os.environ["AZURE_OPENAI_ENDPOINT"] = OPENAI_CHAT_API_BASE
        os.environ["AZURE_OPENAI_API_VERSION"] = OPENAI_CHAT_API_VERSION
        os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] = GPT35_DEPLOYMENT_ENGINE
        
    def get_azure_llm(self, deployment_name, temperature):
        llm = AzureChatOpenAI(
            azure_deployment=deployment_name,
            openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],

            streaming=True,
            callbacks=self.callbacks,
            temperature=temperature,
            verbose=True
        )
        return llm 
    
    def get_azure_llm_deployment(self, deployment_id: str = None, temperature: float = CHAT_TEMPERATURE):
        """
        get the Azure OpenAI LLM according to the deployment id, we only have limited deployments: GPT35-Turbo, GPT4-Turbo and GPT4o

        Args:
            deployment_id (str): the deployment id of the Azure OpenAI service

        Returns:
            AzureChatOpenAI: the Azure OpenAI LLM
        """
        
        # add more deployments and switch by deployment_id
        if deployment_id == "GPT4o":

            #model_name = GPT35_MODEL_NAME
            temperature = temperature
            
            llm = AzureChatOpenAI(
                openai_api_version=GPT4o_API_VERSION,
                azure_deployment=GPT4o_AZURE_DEPLOYMENT,
                temperature = temperature,
                azure_endpoint = GPT4o_API_BASE,
                openai_api_key = GPT4o_API_KEY
            )
            
            return llm
        else:
            # default to GPT35-Turbo
            self.init_config()
            
            deployment_name = GPT35_DEPLOYMENT_ENGINE
            #model_name = GPT35_MODEL_NAME
            temperature = temperature
            
            return self.get_azure_llm(deployment_name, temperature)
