from flask import jsonify
import json
from typing import List, Dict, Any, Optional
from datetime import datetime
import yaml
import os
from .ollama_service import OllamaService
from .chroma_service import ChromaService
from .metrics_service import MetricsService
from config import MODEL_NAME, INDICATORS_COLLECTION, DIMENSIONS_COLLECTION, MDR_CONFIG
import pandas as pd
import re
import plotly
import plotly.express as px
import plotly.graph_objects as go
from .text2sql_agent_service import Text2SQLAgentService

class IndicatorAgentService:
    def __init__(self):
        self.ollama = OllamaService()
        self.chroma = ChromaService()
        self.metrics = MetricsService(base_url=MDR_CONFIG['base_url'])
        self.model_name = MODEL_NAME
        self.prompts = self._load_prompts()
        self.text2sql = Text2SQLAgentService()
        
    def _load_prompts(self) -> Dict[str, Dict[str, str]]:
        """加载prompt模板配置"""
        config_path = os.path.join('config', 'prompts.yaml')
        with open(config_path, 'r', encoding='utf-8') as f:
            return yaml.safe_load(f)

    def parse_user_query(self, query: str) -> Dict[str, Any]:
        """解析用户查询，提取指标、维度和过滤条件"""
        try:
            response = self.ollama.chat(
                self.prompts['parse_query']['system'],
                query,
                self.model_name
            )
            if not response:
                print("Warning: LLM returned empty response")
                return {"indicators": [], "dimensions": [], "filters": []}
                
            parsed_result = self._parse_llm_response(response, {"indicators": [], "dimensions": [], "filters": []})
            if not parsed_result.get('indicators'):
                print("Warning: No indicators identified")
                return {"indicators": [], "dimensions": [], "filters": []}
                
            return parsed_result
            
        except Exception as e:
            print(f"Error parsing user query: {str(e)}")
            return {"indicators": [], "dimensions": [], "filters": []}

    def find_matching_indicators(self, indicator_desc: List[str]) -> Dict[str, Any]:
        """查找匹配的指标"""
        try:
            if not indicator_desc:
                raise ValueError("指标描述不能为空")
            # 获取指标的向量表示
            query_text = " ".join(indicator_desc)
            # 从向量库检索相似指标
            results = self.chroma.list_records(
                collection_name=INDICATORS_COLLECTION,
                page=1,
                page_size=10,
                search_type='hybrid',
                record_id=None,
                query_text=query_text,
                metadata_filter=None,
                document_filter=None,
                n_results_requested=10
            )
            if not results.get('records'):
                raise ValueError("未找到匹配的指标记录")

            response = self.ollama.chat(
                self.prompts['match_indicators']['system'],
                self.prompts['match_indicators']['user'].format(
                    query_indicators=indicator_desc, 
                    indicators=json.dumps(results['records'], ensure_ascii=False, indent=2)
                    ),
                self.model_name
            )
            matched_indicators = self._parse_llm_response(response, [])
    
            # 提取所有匹配指标的主题
            themes = set()
            for indicator in matched_indicators:
                if 'theme' in indicator:
                    themes.add(indicator['theme'])
    
            return {
                'indicators': matched_indicators,
                'themes': list(themes),
                'status': 'success'
            }
        except Exception as e:
            return {
                'indicators': [],
                'themes': [],
                'status': 'error',
                'message': f"查找匹配指标时发生错误: {str(e)}"
            }

    def find_matching_dimensions(self, dimension_desc: List[str], themes: List[str]) -> List[Dict[str, Any]]:
        """根据维度描述和主题查找匹配的维度，只返回所有主题共有的维度"""
        try:
            # 获取每个主题的维度记录
            theme_dimensions = {}
            for theme in themes:
                results = self.chroma.list_records(
                    collection_name=DIMENSIONS_COLLECTION,
                    page=1,
                    page_size=10,
                    search_type='hybrid',
                    record_id=None,
                    query_text=" ".join(dimension_desc),
                    metadata_filter={"theme": theme},
                    document_filter=None,
                    n_results_requested=10
                )
                theme_dimensions[theme] = {}
                for idx, record in enumerate(results.get('records', [])):
                    doc = record.get('document', '{}')
                    if isinstance(doc, str):
                        doc = json.loads(doc)
                    dimension_code = doc.get('维度代码', record.get('id', str(idx)))
                    theme_dimensions[theme][dimension_code] = doc

            # 找出所有主题共有的维度codes
            common_dimension_codes = set(theme_dimensions[themes[0]].keys())
            for theme in themes[1:]:
                common_dimension_codes &= set(theme_dimensions[theme].keys())

            # 获取共有维度的完整记录
            common_dimensions = [
                theme_dimensions[themes[0]][code]
                for code in common_dimension_codes
            ]

            response = self.ollama.chat(
                self.prompts['match_dimensions']['system'],
                self.prompts['match_dimensions']['user'].format(query_dimensions=json.dumps(dimension_desc, ensure_ascii=False),
                                                                themes=json.dumps(themes, ensure_ascii=False),
                                                                common_dimensions=json.dumps(common_dimensions, ensure_ascii=False, indent=2)),
                self.model_name
            )
            return self._parse_llm_response(response, [])
        except Exception as e:
            return []

    def generate_filter_conditions(self, filters: List[Dict], themes: List[str]) -> List[Dict[str, Any]]:
        """
        生成过滤条件，只使用所有主题共有的过滤字段
        
        Args:
            filter_desc: 用户描述的过滤条件列表
            themes: 主题列表
        
        Returns:
            List[Dict[str, Any]]: 生成的过滤条件列表
        """
        try: 
            filter_desc = [item['field'] for item in filters]
            # 获取每个主题的过滤字段
            theme_filter_fields = {}
            for theme in themes:
                results = self.chroma.list_records(
                    collection_name=DIMENSIONS_COLLECTION,
                    page=1,
                    page_size=10,
                    search_type='hybrid',
                    record_id=None,
                    query_text=" ".join(filter_desc),
                    metadata_filter={"theme": theme},
                    document_filter=None,
                    n_results_requested=10
                )
                # 收集每个主题下所有维度的过滤字段
                theme_filter_fields[theme] = {}
                for idx, record in enumerate(results.get('records', [])):
                    doc = record.get('document', '{}')
                    if isinstance(doc, str):
                        doc = json.loads(doc)
                    field_code = doc.get('维度代码', record.get('id', str(idx)))
                    theme_filter_fields[theme][field_code] = {
                        '字段代码': doc.get('维度代码'),
                        '字段名称': doc.get('维度描述', '')
                    }
            
            common_fields_codes = set(theme_filter_fields[themes[0]].keys())
            for theme in themes[1:]:
                common_fields_codes &= set(theme_filter_fields[theme].keys())

            common_filter_fields = [
                theme_filter_fields[themes[0]][code]
                for code in common_fields_codes
            ]

            print(f"Common filter fields: {json.dumps(common_filter_fields, ensure_ascii=False, indent=2)}")

            response = self.ollama.chat(
                self.prompts['generate_filters']['system'],
                self.prompts['generate_filters']['user'].format(query_filters=json.dumps(filters, ensure_ascii=False, indent=2),
                                                                available_fields=json.dumps(common_filter_fields, ensure_ascii=False, indent=2)
                                                                ),
                self.model_name
            )
            return self._parse_llm_response(response, [])
        except Exception as e:
            return []

    def query_indicator_data(self, params: Dict[str, Any]) -> Optional[pd.DataFrame]:
        """
        调用指标查询接口获取数据并返回DataFrame格式
        
        Args:
            params (Dict[str, Any]): 查询参数，包含:
                - startDate: 开始日期
                - endDate: 结束日期
                - kpis: 指标列表
                - dims: 维度列表
                
        Returns:
            Optional[pd.DataFrame]: 查询结果DataFrame，如果查询失败返回None
                
        Example:
            >>> params = {
            ...     "startDate": "2024-02-01",
            ...     "endDate": "2024-02-26",
            ...     "kpis": "revenue as 收入",
            ...     "dims": "date as 日期, dept as 科室"
            ... }
            >>> df = query_indicator_data(params)
        """
        try:
            if not params:
                raise ValueError("查询参数不能为空")
                
            # 验证必要参数
            required_params = ['kpis']
            missing_params = [param for param in required_params if not params.get(param)]
            if missing_params:
                raise ValueError(f"缺少必要参数: {', '.join(missing_params)}")
                
            # 调用指标查询服务获取数据
            result = self.metrics.get_kpis(params)
            
            if result is None or (isinstance(result, (list, dict)) and not result):
                raise ValueError("查询未返回任何数据")
                
            # 将结果转换为DataFrame
            try:
                if isinstance(result, pd.DataFrame):
                    df = result
                elif isinstance(result, dict):
                    df = pd.DataFrame.from_dict(result)
                elif isinstance(result, list):
                    df = pd.DataFrame(result)
                else:
                    raise ValueError(f"不支持的数据格式: {type(result)}")
            except Exception as e:
                raise ValueError(f"数据格式转换失败: {str(e)}")
                
            # 基本数据清理
            df = df.fillna('')  # 填充空值
            
            # 对日期列进行处理
            date_columns = df.select_dtypes(include=['datetime64']).columns
            for col in date_columns:
                df[col] = df[col].dt.strftime('%Y-%m-%d')
                
            return df
            
        except ValueError as e:
            print(f"指标查询错误: {str(e)}")
            return None
        except Exception as e:
            print(f"查询指标数据时发生未知错误: {str(e)}")
            return None

    def generate_chart_code(self, data: Dict[str, Any]) -> str:
        """根据查询结果生成图表代码"""
        
        response = self._call_ollama(self.prompts['generate_chart']['system'], json.dumps(data))
        return response

    def _call_ollama(self, system_prompt: str, user_prompt: str) -> str:
        """调用Ollama模型 - 使用chat方法替代"""
        return self.ollama.chat(system_prompt, user_prompt, self.model_name)

    def _parse_llm_response(self, response: str, default_value: Any = None) -> Any:
        """
        解析大模型返回的JSON格式响应
        
        Args:
            response (str): 大模型返回的响应字符串
            default_value (Any, optional): 解析失败时的默认返回值
            
        Returns:
            Any: 解析后的Python对象，解析失败则返回default_value
        """
        try:
            if not response:
                raise ValueError("LLM返回空响应")
            # 1. 尝试直接解析
            try:
                return json.loads(response)
            except json.JSONDecodeError:
                pass
                
            # 2. 尝试提取JSON部分
            import re
            json_pattern = r'\{[\s\S]*\}'  # 匹配包括换行符在内的所有JSON内容
            json_match = re.search(json_pattern, response)
            
            if (json_match):
                try:
                    return json.loads(json_match.group())
                except json.JSONDecodeError:
                    pass
            
            # 3. 清理常见的格式问题
            cleaned_response = response.strip()
            # 移除可能的markdown代码块标记
            if cleaned_response.startswith('```json'):
                cleaned_response = cleaned_response[7:]
            if cleaned_response.endswith('```'):
                cleaned_response = cleaned_response[:-3]
                
            cleaned_response = cleaned_response.strip()
            
            try:
                return json.loads(cleaned_response)
            except json.JSONDecodeError as e:
                print(f"JSON parse error: {str(e)}")
                print(f"Raw response: {response}")
                return default_value
                
        except Exception as e:
            print(f"解析LLM响应时发生错误: {str(e)}")
            print(f"原始响应: {response}")
            return default_value
    # df.head(10).to_json(orient='records')
    def get_kpis(self, params):
        return self.metrics.get_kpis(params)

    def generate_query_json(self, matching_indicators: List[Dict], 
                       dimension_ids: List[Dict], 
                       filter_conditions: List[Dict]) -> Dict:
        """生成查询JSON"""
        
        context = self.prompts['date_converter']['user'].format(filter_conditions=json.dumps(filter_conditions, ensure_ascii=False))

        current_date = datetime.now().strftime('%Y-%m-%d')
        date_prompt = self.prompts['date_converter']['system'].format(
            current_date=current_date
        )
        
        date_response = self.ollama.chat(
            date_prompt, 
            context,
            self.model_name
        )
        date_range = self._parse_llm_response(
            date_response, 
            {"startDate": "", "endDate": ""}
        )

        # 构建kpis字符串
        kpis = ", ".join([f"{ind['code']} as {ind['name']}" 
                         for ind in matching_indicators])
        
        # 构建dims字符串
        dims = ", ".join([f"{dim['code']} -> Desc as {dim['name']}" 
                         for dim in dimension_ids])

        return {
            "startDate": date_range["startDate"],
            "endDate": date_range["endDate"],
            "kpis": kpis,
            "dims": dims
        }
    def generate_plot_code(self, question: str = None, query_params: str = None, df_metadata: str = None) -> Dict[str, Any]:
        try:
            if not df_metadata:
                raise ValueError("缺少数据框架元数据信息")
                
            if question is not None:
                system_msg = f"The following is a pandas DataFrame that contains the results of the query that answers the question the user asked: '{question}'"
            else:
                system_msg = "The following is a pandas DataFrame "

            if query_params is not None:
                system_msg += f"\n\nThe DataFrame was produced using this params: {query_params}\n\n"
            
            system_msg += f"The following is information about the resulting pandas DataFrame 'df': \n{df_metadata}"

            prompt = [
                {"role": "system", "content": self.prompts['generate_chart']['system'].format(system_msg=system_msg)},
                {"role": "user", "content": self.prompts['generate_plot']['system']}
            ]
            
            plotly_code = self.ollama.submit_prompt(prompt, self.model_name)

            if not plotly_code:
                raise ValueError("生成图表代码失败")
                
            sanitized_code = self._sanitize_plotly_code(self._extract_python_code(plotly_code))
            
            return {
                'status': 'success',
                'code': sanitized_code
            }
            
        except Exception as e:
            return {
                'status': 'error',
                'message': f"生成图表代码时发生错误: {str(e)}",
                'code': None
            }
    
    def _extract_python_code(self, markdown_string: str) -> str:
        # Regex pattern to match Python code blocks
        pattern = r"```[\w\s]*python\n([\s\S]*?)```|```([\\s\S]*?)```"

        # Find all matches in the markdown string
        matches = re.findall(pattern, markdown_string, re.IGNORECASE)

        # Extract the Python code from the matches
        python_code = []
        for match in matches:
            python = match[0] if match[0] else match[1]
            python_code.append(python.strip())

        if len(python_code) == 0:
            return markdown_string

        return python_code[0]
    
    def _sanitize_plotly_code(self, raw_plotly_code: str) -> str:
        # Remove the fig.show() statement from the plotly code
        plotly_code = raw_plotly_code.replace("fig.show()", "")

        return plotly_code
    
    def get_plotly_figure(self,
        plotly_code: str, df: pd.DataFrame, dark_mode: bool = True
    ) -> plotly.graph_objs.Figure:
        """
        **Example:**
        ```python
        fig = vn.get_plotly_figure(
            plotly_code="fig = px.bar(df, x='name', y='salary')",
            df=df
        )
        fig.show()
        ```
        Get a Plotly figure from a dataframe and Plotly code.

        Args:
            df (pd.DataFrame): The dataframe to use.
            plotly_code (str): The Plotly code to use.

        Returns:
            plotly.graph_objs.Figure: The Plotly figure.
        """
        ldict = {"df": df, "px": px, "go": go}
        try:
            exec(plotly_code, globals(), ldict)

            fig = ldict.get("fig", None)
        except Exception as e:
            # Inspect data types
            numeric_cols = df.select_dtypes(include=["number"]).columns.tolist()
            categorical_cols = df.select_dtypes(
                include=["object", "category"]
            ).columns.tolist()

            # Decision-making for plot type
            if len(numeric_cols) >= 2:
                # Use the first two numeric columns for a scatter plot
                fig = px.scatter(df, x=numeric_cols[0], y=numeric_cols[1])
            elif len(numeric_cols) == 1 and len(categorical_cols) >= 1:
                # Use a bar plot if there's one numeric and one categorical column
                fig = px.bar(df, x=categorical_cols[0], y=numeric_cols[0])
            elif len(categorical_cols) >= 1 and df[categorical_cols[0]].nunique() < 10:
                # Use a pie chart for categorical data with fewer unique values
                fig = px.pie(df, names=categorical_cols[0])
            else:
                # Default to a simple line plot if above conditions are not met
                fig = px.line(df)

        if fig is None:
            return None

        if dark_mode:
            fig.update_layout(template="plotly_dark")

        return fig

    def process_indicator_query(self, query: str, dark_mode: bool = False):
        """处理完整的指标查询流程，从自然语言查询到最终的可视化结果"""
        try:
            result = {
                'parsed_result': None,
                'match_indicators': None,
                'match_dimensions': None,
                'match_filters': None,
                'query_params': None,
                'data': None,
                'plot_code': None,
                'figure': None,
                'status': 'success',
                'message': '',
                'type': 'indicator',
                'step': 0,  # 添加步骤追踪
                'step_names': ['解析查询', '匹配指标', '匹配维度', '生成过滤条件', '生成查询参数', '执行查询', '生成图表代码', '创建图表'],
                'step_count': 8
            }

            # 1. 解析用户查询
            parsed_result = self.parse_user_query(query)
            result['step'] = 1
            if not parsed_result:
                return {**result, 'status': 'error', 'message': '查询解析失败'}
            result['parsed_result'] = parsed_result
            print(f"Parsed result: {json.dumps(parsed_result, ensure_ascii=False, indent=2)}")

            # 2. 匹配指标（必须步骤）
            result['step'] = 2
            indicator_desc = parsed_result.get("indicators", [])
            match_result = self.find_matching_indicators(indicator_desc)
            if not match_result or not match_result.get('indicators'):
                print("No matching indicators found, falling back to text2sql service...")
                text2sql_result = self.text2sql.process_text2sql_query(query)
                return text2sql_result
                # return {**result, 'status': 'error', 'message': '未找到匹配的指标'}
            
            matching_indicators = match_result['indicators']
            themes = match_result['themes']
            print(f"Matching indicators: {json.dumps(matching_indicators, ensure_ascii=False, indent=2)}")

            # 3. 匹配维度（可选步骤）
            result['step'] = 3
            dimension_desc = parsed_result.get("dimensions", [])
            matching_dimensions = []
            if dimension_desc:
                matching_dimensions = self.find_matching_dimensions(dimension_desc, themes)
                result['match_dimensions'] = matching_dimensions
                print(f"Matching dimensions: {json.dumps(matching_dimensions, ensure_ascii=False, indent=2)}")

            # 4. 生成过滤条件（可选步骤）
            result['step'] = 4
            filter_conditions = []
            filters = parsed_result.get("filters", [])
            if filters:
                filter_conditions = self.generate_filter_conditions(filters, themes)
                result['match_filters'] = filter_conditions
                print(f"Filter conditions: {json.dumps(filter_conditions, ensure_ascii=False, indent=2)}")

            # 5. 生成查询参数
            result['step'] = 5
            query_params = self.generate_query_json(
                matching_indicators=matching_indicators,
                dimension_ids=matching_dimensions,
                filter_conditions=filter_conditions
            )
            result['query_params'] = query_params
            print(f"Query params: {json.dumps(query_params, ensure_ascii=False, indent=2)}")

            # 6. 执行数据查询
            result['step'] = 6
            query_result = self.query_indicator_data(query_params)
            if query_result is None or query_result.empty:
                return {**result, 'status': 'error', 'message': '查询未返回数据'}
            
            result['data'] = query_result
            print(f"Query result: {query_result.head(10).to_json(orient='records')}")

            # 7. 生成图表代码
            result['step'] = 7
            plot_code = self.generate_plot_code(
                question=query,
                query_params=query_params,
                df_metadata=f"Running df.dtypes gives:\n {query_result.dtypes}"
            )
            if not plot_code:
                return {**result, 'status': 'error', 'message': '图表代码生成失败'}
            
            result['plot_code'] = plot_code
            print(f"Plot code: {plot_code}")

            # 8. 生成Plotly图表
            result['step'] = 8
            figure = self.get_plotly_figure(
                plotly_code=plot_code,
                df=query_result,
                dark_mode=dark_mode
            )
            result['figure'] = figure
            print(f"Figure generated successfully")

            return result

        except Exception as e:
            import traceback
            error_msg = f"步骤 {result.get('step', 0)} 处理失败: {str(e)}\n{traceback.format_exc()}"
            print("\n=== Error Details ===")
            print(error_msg)
            return {**result, 'status': 'error', 'message': error_msg}

    async def process_indicator_query_stream(self, query: str, dark_mode: bool = False):
        """处理完整的指标查询流程的流式版本，每个步骤的结果都会立即返回"""
        result = {
            'parsed_result': None,
            'match_indicators': None,
            'match_dimensions': None,
            'match_filters': None,
            'query_params': None,
            'data': None,
            'plot_code': None,
            'figure': None,
            'status': 'processing',
            'message': '',
            'step': ''
        }

        try:
            # 1. 解析用户查询
            result['step'] = 'parsing_query'
            yield {**result, 'message': '正在解析查询...'}
            
            parsed_result = self.parse_user_query(query)
            if not parsed_result:
                result['status'] = 'error'
                result['message'] = '查询解析失败'
                yield result
                return
                
            result['parsed_result'] = parsed_result
            yield {**result, 'message': '查询解析完成'}

            # 2. 匹配指标
            result['step'] = 'matching_indicators'
            yield {**result, 'message': '正在匹配指标...'}
            
            indicator_desc = parsed_result.get("indicators", [])
            match_result = self.find_matching_indicators(indicator_desc)
            
            if not match_result or not match_result.get('indicators'):
                result['status'] = 'error'
                result['message'] = '未找到匹配的指标'
                yield result
                return
            
            matching_indicators = match_result['indicators']
            themes = match_result['themes']
            result['match_indicators'] = match_result
            yield {**result, 'message': '指标匹配完成'}

            # 3. 匹配维度
            result['step'] = 'matching_dimensions'
            dimension_desc = parsed_result.get("dimensions", [])
            matching_dimensions = []
            
            if dimension_desc:
                yield {**result, 'message': '正在匹配维度...'}
                matching_dimensions = self.find_matching_dimensions(dimension_desc, themes)
                result['match_dimensions'] = matching_dimensions
                yield {**result, 'message': '维度匹配完成'}

            # 4. 生成过滤条件
            result['step'] = 'generating_filters'
            filter_conditions = []
            filters = parsed_result.get("filters", [])
            
            if filters:
                yield {**result, 'message': '正在生成过滤条件...'}
                filter_conditions = self.generate_filter_conditions(filters, themes)
                result['match_filters'] = filter_conditions
                yield {**result, 'message': '过滤条件生成完成'}

            # 5. 生成查询参数
            result['step'] = 'generating_query'
            yield {**result, 'message': '正在生成查询参数...'}
            
            query_params = self.generate_query_json(
                matching_indicators=matching_indicators,
                dimension_ids=matching_dimensions,
                filter_conditions=filter_conditions
            )
            result['query_params'] = query_params
            yield {**result, 'message': '查询参数生成完成'}

            # 6. 执行数据查询
            result['step'] = 'querying_data'
            yield {**result, 'message': '正在查询数据...'}
            
            query_result = self.query_indicator_data(query_params)
            if query_result is None or query_result.empty:
                result['status'] = 'error'
                result['message'] = '查询未返回数据'
                yield result
                return
            
            result['data'] = query_result
            yield {**result, 'message': '数据查询完成'}

            # 7. 生成图表代码
            result['step'] = 'generating_plot'
            yield {**result, 'message': '正在生成图表代码...'}
            
            plot_code = self.generate_plot_code(
                question=query,
                query_params=query_params,
                df_metadata=f"Running df.dtypes gives:\n {query_result.dtypes}"
            )
            if not plot_code:
                result['status'] = 'error'
                result['message'] = '图表代码生成失败'
                yield result
                return
            
            result['plot_code'] = plot_code
            yield {**result, 'message': '图表代码生成完成'}

            # 8. 生成Plotly图表
            result['step'] = 'generating_figure'
            yield {**result, 'message': '正在生成图表...'}
            
            figure = self.get_plotly_figure(
                plotly_code=plot_code['code'],
                df=query_result,
                dark_mode=dark_mode
            )
            result['figure'] = figure
            result['status'] = 'success'
            yield {**result, 'message': '图表生成完成'}

        except Exception as e:
            import traceback
            error_msg = f"处理失败: {str(e)}\n{traceback.format_exc()}"
            yield {
                **result,
                'status': 'error',
                'message': error_msg
            }