from abc import ABC, abstractmethod
from typing import Dict, List, Any
import pandas as pd
from dataclasses import dataclass
import requests
import json
from chroma_manager import ChromaManager
from datetime import datetime
import re
import plotly
import plotly.express as px
import plotly.graph_objects as go
from openai import OpenAI
from typing import Dict, List, Any, Optional
import logging

@dataclass
class QueryResult:
    indicators: str
    dimensions: str

class ModelService(ABC):
    @abstractmethod
    def generate_query_params(self, question: str, query_result: QueryResult) -> str:
        """Generate query parameters based on user question and vector search results"""
        pass
    
    @abstractmethod
    def generate_plot_code(self, question: str, query_params: str, df_metadata: str) -> str:
        """Generate Plotly visualization code"""
        pass

    @abstractmethod
    def submit_prompt(self, prompt, **kwargs) -> str:
        """Submit prompt to the model and return the response"""
        pass

    @abstractmethod
    def extract_json(self, llm_response: str) -> str:
        """Extract JSON from LLM response"""
        pass

    def _get_kpi_prompt(self, question: str, query_result: QueryResult) -> str:
        json_example = '''
{
    "startDate": "2023-11-01",
    "endDate": "2023-11-30",
    "kpis": "S0010002 as 住院量",
    "dims": "D001 -> Desc as 病区",
    "basis": "挂号日期"
}'''
        
        return f"""
你是一位数据分析助手，帮助用户生成查询接口的参数。  
用户问题是：“{question}”。  

附加信息：
1. 当前时间是：{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}。
2. 用户问题中“按照xxx时间或日期统计”一般指的是**统计口径**，应识别为 `basis` 字段；“按xxx展示”指的是**维度**，应识别为 `dims` 字段。
3. 时间范围说明：
    “去年”指的是上一自然年的时间范围，例如当前时间是 2024 年，则“去年”指 2023-01-01 到 2023-12-31。
    “上个月”指的是上一自然月，例如当前时间是 2024-06-15，则“上个月”指 2024-05-01 到 2024-05-31。
    “本月”指的是当前自然月，例如当前时间是 2024-06-15，则“本月”指 2024-06-01 到 2024-06-30。

后台系统中，你可以使用的指标和维度如下：  
(指标描述中括号内的是指标的统计口径)
{query_result.indicators}

{query_result.dimensions}

根据用户的问题，生成以下 JSON 参数：  
1. 时间范围（startDate 和 endDate）：推断问题中提到的时间范围。  
2. kpis：选择合适的指标代码，格式为“指标代码 as 别名”  
3. dims：选择合适的维度代码，格式为“维度代码 -> Desc as 别名”。  
4. basis：统计口径。  

请严格按照以下要求生成输出：  
1. 只输出 JSON，禁止附加任何其他内容（如文字说明）。  
2. 输出必须是有效的 JSON 格式。  
3. 输出中指标和维度必须基于相同的主题，且指标和维度要能通过主题关联上。
4. 如果多个指标或维度符合语义，选择出最符合用户提问的项。
5. 如果从现有信息中无法确定JSON中的参数，将该参数输出为空即可。
6. 指标和维度别名中不要出现括号，如“住院量（人次）”应该写成“住院量”。

示例：  
输入问题：“统计上个月的住院量按病区分类”  
输出：  
```json
{json_example}
"""
    
    def extract_json(self, llm_response):
        # Remove ollama-generated extra characters
        llm_response = llm_response.replace("\\_", "_")
        llm_response = llm_response.replace("\\", "")

        # Regular expression to find ```json' and capture until '```'
        json = re.search(r"```json\n((.|\n)*?)(?=;|\[|```)", llm_response, re.DOTALL)

        if json:
            # self.log(
            #     f"Output from LLM: {llm_response} \nExtracted SQL: {json.group(1)}")
            return json.group(1).replace("```", "")
        else:
            return llm_response

    def get_plotly_figure(
        self, plotly_code: str, df: pd.DataFrame, dark_mode: bool = True
    ) -> plotly.graph_objs.Figure:
        """
        **Example:**
        ```python
        fig = vn.get_plotly_figure(
            plotly_code="fig = px.bar(df, x='name', y='salary')",
            df=df
        )
        fig.show()
        ```
        Get a Plotly figure from a dataframe and Plotly code.

        Args:
            df (pd.DataFrame): The dataframe to use.
            plotly_code (str): The Plotly code to use.

        Returns:
            plotly.graph_objs.Figure: The Plotly figure.
        """
        ldict = {"df": df, "px": px, "go": go}
        try:
            exec(plotly_code, globals(), ldict)

            fig = ldict.get("fig", None)
        except Exception as e:
            # Inspect data types
            numeric_cols = df.select_dtypes(include=["number"]).columns.tolist()
            categorical_cols = df.select_dtypes(
                include=["object", "category"]
            ).columns.tolist()

            # Decision-making for plot type
            if len(numeric_cols) >= 2:
                # Use the first two numeric columns for a scatter plot
                fig = px.scatter(df, x=numeric_cols[0], y=numeric_cols[1])
            elif len(numeric_cols) == 1 and len(categorical_cols) >= 1:
                # Use a bar plot if there's one numeric and one categorical column
                fig = px.bar(df, x=categorical_cols[0], y=numeric_cols[0])
            elif len(categorical_cols) >= 1 and df[categorical_cols[0]].nunique() < 10:
                # Use a pie chart for categorical data with fewer unique values
                fig = px.pie(df, names=categorical_cols[0])
            else:
                # Default to a simple line plot if above conditions are not met
                fig = px.line(df)

        if fig is None:
            return None

        if dark_mode:
            fig.update_layout(template="plotly_dark")

        return fig

    def _extract_python_code(self, markdown_string: str) -> str:
        # Regex pattern to match Python code blocks
        pattern = r"```[\w\s]*python\n([\s\S]*?)```|```([\s\S]*?)```"

        # Find all matches in the markdown string
        matches = re.findall(pattern, markdown_string, re.IGNORECASE)

        # Extract the Python code from the matches
        python_code = []
        for match in matches:
            python = match[0] if match[0] else match[1]
            python_code.append(python.strip())

        if len(python_code) == 0:
            return markdown_string

        return python_code[0]

    def _sanitize_plotly_code(self, raw_plotly_code: str) -> str:
        # Remove the fig.show() statement from the plotly code
        plotly_code = raw_plotly_code.replace("fig.show()", "")

        return plotly_code
    
    def log(self, message: str, title: str = "Info"):
        print(f"{title}: {message}")

class OllamaModelService(ModelService):
    def __init__(self, model_name: str, base_url: str):
        self.model_name = model_name
        self.base_url = base_url
        
    def generate_query_params(self, question: str, query_result: QueryResult) -> str:
        content = self._get_kpi_prompt(question, query_result)
        self.log(title="Prompt Content", message=content)
        prompt = [
                    {"role": "system", "content": "你是一位数据分析助手，帮助用户生成查询接口的参数。"},
                    {"role": "user", "content": content}
                ]
        self.log(title="Params Prompt", message=prompt)
        response = self.submit_prompt(prompt)
        self.log(title="LLM Response", message=response)
        
        return self.extract_json(response)
    
    def generate_plot_code(self, question: str = None, query_params: str = None, df_metadata: str = None) -> str:
        if question is not None:
            system_msg = f"The following is a pandas DataFrame that contains the results of the query that answers the question the user asked: '{question}'"
        else:
            system_msg = "The following is a pandas DataFrame "

        if query_params is not None:
            system_msg += f"\n\nThe DataFrame was produced using this params: {query_params}\n\n"
        
        system_msg += f"The following is information about the resulting pandas DataFrame 'df': \n{df_metadata}"

        prompt = [
                    {"role": "system", "content": system_msg},
                    {"role": "user", "content": "Can you generate the Python plotly code to chart the results of the dataframe? Assume the data is in a pandas dataframe called 'df'. If there is only one value in the dataframe, use an Indicator. Respond with only Python code. Do not answer with any explanations -- just the code."}
                ]
        
        self.log(title="Plotly Prompt", message=prompt)
        plotly_code = self.submit_prompt(prompt)
        self.log(title="LLM Response", message=plotly_code)
    
        return self._sanitize_plotly_code(self._extract_python_code(plotly_code))
        
    def submit_prompt(self, prompt, **kwargs) -> str:
        # self.log(
        # f"Ollama parameters:\n"
        # f"model={self.model_name},\n"
        # f"options={self.ollama_options},\n"
        # f"keep_alive={self.keep_alive}")
        # self.log(f"Prompt Content:\n{json.dumps(prompt, ensure_ascii=False)}")

        response = requests.post(
            f"{self.base_url}/api/chat",
            json={
                "model": self.model_name,
                "messages": prompt,
                "stream": False
            }
        )

        # self.log(f"Ollama Response:\n{str(response)}")

        if response.status_code != 200:
            raise Exception(f"API call failed: {response.text}")

        return response.json()['message']['content']

class LangflowModelService(ModelService):
    def __init__(self, flow_id: str, base_url: str):
        self.flow_id = flow_id
        self.base_url = base_url
        
    def generate_query_params(self, question: str, query_result: QueryResult) -> Dict:
        response = requests.post(
            f"{self.base_url}/api/v1/flows/{self.flow_id}/query_params",
            json={
                "question": question,
                "indicators": query_result.indicators,
                "dimensions": query_result.dimensions,
                "themes": query_result.themes
            }
        )
        return response.json()
    
    def generate_plot_code(self, question: str, query_params: Dict, df_metadata: Dict) -> str:
        response = requests.post(
            f"{self.base_url}/api/v1/flows/{self.flow_id}/plot_code",
            json={
                "question": question,
                "query_params": query_params,
                "df_metadata": df_metadata
            }
        )
        return response.json()["plot_code"]

class DeepSeekModelService(ModelService):
    def __init__(self, api_key: str, base_url: str = "https://api.deepseek.com"):
        self.client = OpenAI(api_key=api_key, base_url=base_url)
        self.logger = logging.getLogger(__name__)

    def submit_prompt(self, prompt, **kwargs) -> str:
        """Submit prompt to the model and return the response"""
        try:
            response = self.client.chat.completions.create(
                model="deepseek-chat",
                messages=prompt,
                stream=False
            )
            return response.choices[0].message.content
        except Exception as e:
            self.logger.error(f"DeepSeek API error: {str(e)}")
            raise

    def _get_chat_completion(self, system_prompt: str, user_prompt: str) -> str:
        # try:
        #     response = self.client.chat.completions.create(
        #         model="deepseek-chat",
        #         messages=[
        #             {"role": "system", "content": system_prompt},
        #             {"role": "user", "content": user_prompt}
        #         ],
        #         stream=False
        #     )
        #     return response.choices[0].message.content
        # except Exception as e:
        #     self.logger.error(f"DeepSeek API error: {str(e)}")
        #     raise
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]
        return self.submit_prompt(messages)

    def generate_query_params(self, question: str, query_result: QueryResult) -> str:
        content = self._get_kpi_prompt(question, query_result)
        self.log(title="Prompt Content", message=content)
        prompt = [
                    {"role": "system", "content": "你是一位数据分析助手，帮助用户生成查询接口的参数。"},
                    {"role": "user", "content": content}
                ]
        self.log(title="Params Prompt", message=prompt)
        response = self.submit_prompt(prompt)
        self.log(title="LLM Response", message=response)
        
        return self.extract_json(response)

    def generate_plot_code(self, question: str, query_params: str, df_metadata: str) -> str:
        if question is not None:
            system_msg = f"The following is a pandas DataFrame that contains the results of the query that answers the question the user asked: '{question}'"
        else:
            system_msg = "The following is a pandas DataFrame "

        if query_params is not None:
            system_msg += f"\n\nThe DataFrame was produced using this params: {query_params}\n\n"
        
        system_msg += f"The following is information about the resulting pandas DataFrame 'df': \n{df_metadata}"

        prompt = [
                    {"role": "system", "content": system_msg},
                    {"role": "user", "content": "Can you generate the Python plotly code to chart the results of the dataframe? Assume the data is in a pandas dataframe called 'df'. If there is only one value in the dataframe, use an Indicator. Respond with only Python code. Do not answer with any explanations -- just the code."}
                ]
        
        self.log(title="Plotly Prompt", message=prompt)
        plotly_code = self.submit_prompt(prompt)
        self.log(title="LLM Response", message=plotly_code)
    
        return self._sanitize_plotly_code(self._extract_python_code(plotly_code))

def create_model_service(service_type: str, **kwargs) -> ModelService:
    """Factory method to create model service instance"""
    if service_type == "ollama":
        return OllamaModelService(
            model_name=kwargs.get("model_name", "llama2"),
            base_url=kwargs.get("base_url", "http://localhost:11434")
        )
    elif service_type == "langflow":
        return LangflowModelService(
            flow_id=kwargs["flow_id"],
            base_url=kwargs.get("base_url", "http://localhost:7860")
        )
    elif service_type == "deepseek":
        return DeepSeekModelService(
            api_key=kwargs.get("api_key"),
            base_url=kwargs.get("base_url", "https://api.deepseek.com")
        )
    raise ValueError(f"Unknown service type: {service_type}")