from flask import jsonify
import json
from typing import List, Dict, Any, Optional
from datetime import datetime
import yaml
import os
from .chroma_service import ChromaService
from .metrics_service import MetricsService
from config import MODEL_NAME, INDICATORS_COLLECTION, DIMENSIONS_COLLECTION, MDR_CONFIG, CODE_TABLES_COLLECTION, SUBJECTS_COLLECTION
import pandas as pd
import re
import plotly
import plotly.express as px
import plotly.graph_objects as go
from .permission_control import apply_indicator_permissions, get_authorized_dimensions_and_filters, compute_filter_intersection
from llm.model_factory import ModelFactory
from utils.json2md import json_to_md_table
import math
from utils.date_transformer import transform_json
from agents.text2sql import Text2SQLAgent

class IndicatorAgentService:
    def __init__(self):
        self.model_client = ModelFactory.create_model()
        self.chroma = ChromaService()
        self.metrics = MetricsService(base_url=MDR_CONFIG['base_url'])
        self.model_name = MODEL_NAME
        self.prompts = self._load_prompts()
        self.text2sql = Text2SQLAgent()
        self.subject_meta_name = "subject_area"
        self.dimension_code_name = "dimension_code"
        self.dimension_desc_name = "dimension_description"
        
    def _load_prompts(self) -> Dict[str, Dict[str, str]]:
        """加载prompt模板配置"""
        config_path = os.path.join('config', 'prompts.yaml')
        with open(config_path, 'r', encoding='utf-8') as f:
            return yaml.safe_load(f)

    def parse_user_query(self, query: str, history_messages) -> Dict[str, Any]:
        """解析用户查询，提取指标、维度和过滤条件"""
        try:
            current_date = datetime.now().strftime('%Y-%m-%d')
            response = self._call_llm(self.prompts['parse_query']['system'], f"{query} \n当前日期是: {current_date}", history_messages)
            if not response:
                print("Warning: LLM returned empty response")
                return {"indicators": [], "dimensions": [], "filters": []}
                
            parsed_result = self._parse_llm_response(response, {"indicators": [], "dimensions": [], "filters": []})
            if not parsed_result.get('indicators'):
                print("Warning: No indicators identified")
                return {"indicators": [], "dimensions": [], "filters": []}
                
            return parsed_result
            
        except Exception as e:
            print(f"Error parsing user query: {str(e)}")
            return {"indicators": [], "dimensions": [], "filters": []}
        
    def find_matching_subjects(self, query: str, history_messages) -> Dict[str, Any]:
        """查找匹配的主题"""
        try:
            results = self.chroma.list_records(
                collection_name=SUBJECTS_COLLECTION,
                embedding_model=None,
                page=1,
                page_size=10,
                search_type='hybrid',
                record_id=None,
                query_text=query,
                metadata_filter=None,
                document_filter=None,
                n_results_requested=10
            )
            if not results.get('records'):
                raise ValueError("未找到匹配的主题信息")
            
            system_prompt = self.prompts['match_subjects']['system']
            user_prompt = self.prompts['match_subjects']['user'].format(
                query=query,
                subjects=json_to_md_table(results['records']))
            response = self._call_llm(system_prompt, user_prompt, history_messages)
            
            matched_subject = self._parse_llm_response(response, {})
    
            return {
                'subject': matched_subject,
                'status': 'success'
            }
            
        except Exception as e:
            return {
                'subject': {},
                'status': 'error',
                'message': f"查找匹配主题时发生错误: {str(e)}"
            }
    def find_matching_indicators(self, query: str, indicator_desc: List[str], subject: str, history_messages) -> Dict[str, Any]:
        """查找匹配的指标"""
        try:
            if not indicator_desc:
                raise ValueError("指标描述不能为空")
            
            # 计算每次检索的条数，确保总数不超过10
            desc_count = len(indicator_desc)
            page_size = max(1, 10 // desc_count)
            all_records = []

            # 循环每个指标描述，分别检索
            for desc in indicator_desc:
                query_text = desc
                results = self.chroma.list_records(
                    collection_name=INDICATORS_COLLECTION,
                    embedding_model=None,
                    page=1,
                    page_size=page_size,
                    search_type='hybrid',
                    record_id=None,
                    query_text=query_text,
                    metadata_filter={self.subject_meta_name: subject},
                    document_filter=None,
                    n_results_requested=page_size
                )
                all_records.extend(results.get('records', []))

            # 合并后只保留前10条
            all_records = all_records[:10]
            if not all_records:
                raise ValueError("未找到匹配的指标记录")

            system_prompt = self.prompts['match_indicators']['system']
            for r in all_records:
                r['unit'] = r['metadata'].get('unit', '')

            user_prompt = self.prompts['match_indicators']['user'].format(query_indicators=indicator_desc, 
                                              query=query,
                                              indicators=json_to_md_table(all_records))
            response = self._call_llm(system_prompt, user_prompt, history_messages)
            
            matched_indicators = self._parse_llm_response(response, [])
            # 在每个指标中添加 `unit` 字段，默认为空字符串
            for indicator in matched_indicators:
                indicator['unit'] = indicator.get('unit', '') 
            # 提取所有匹配指标的主题
            themes = set()
            for indicator in matched_indicators:
                if 'theme' in indicator:
                    themes.add(indicator['theme'])
                indicator['unit'] = indicator.get('unit', '') 
    
            return {
                'indicators': matched_indicators,
                'themes': list(themes),
                'status': 'success'
            }
        except Exception as e:
            return {
                'indicators': [],
                'themes': [],
                'status': 'error',
                'message': f"查找匹配指标时发生错误: {str(e)}"
            }
        
    def get_avalaible_dimensions(self, dimension_desc: List[str], themes: List[str]) -> List[Dict[str, Any]]:
        try:
            # 计算每次检索的条数，确保每个主题总数不超过10
            desc_count = len(dimension_desc) if dimension_desc else 1
            page_size = max(1, 10 // desc_count)
            theme_dimensions = {}

            for theme in themes:
                theme_dimensions[theme] = {}
                all_records = []
                # 循环每个维度描述，分别检索
                for desc in dimension_desc:
                    results = self.chroma.list_records(
                        collection_name=DIMENSIONS_COLLECTION,
                        embedding_model=None,
                        page=1,
                        page_size=page_size,
                        search_type='hybrid',
                        record_id=None,
                        query_text=desc,
                        metadata_filter={self.subject_meta_name: theme},
                        document_filter=None,
                        n_results_requested=page_size
                    )
                    all_records.extend(results.get('records', []))
                # 合并后只保留前10条
                all_records = all_records[:10]
                for idx, record in enumerate(all_records):
                    doc = record.get('document', '{}')
                    if isinstance(doc, str):
                        doc = json.loads(doc)
                    # 遍历所有键值对，替换值为 NaN 的项为 None
                    for key, value in doc.items():
                        if isinstance(value, float) and math.isnan(value):
                            doc[key] = None
                    dimension_code = doc.get(self.dimension_code_name, record.get('id', str(idx)))
                    theme_dimensions[theme][dimension_code] = doc

            # 找出所有主题共有的维度codes
            if not themes:
                return []
            common_dimension_codes = set(theme_dimensions[themes[0]].keys())
            for theme in themes[1:]:
                common_dimension_codes &= set(theme_dimensions[theme].keys())

            # 获取共有维度的完整记录
            common_dimensions = [
                theme_dimensions[themes[0]][code]
                for code in common_dimension_codes
            ]

            return common_dimensions
        except Exception as e:
            return []
        
    def get_avalaible_code_table_values(self, value_desc: List[str]) -> List[Dict[str, Any]]:
        try:
            values = []
            results = self.chroma.list_records(
                collection_name=CODE_TABLES_COLLECTION,
                embedding_model=None,
                page=1,
                page_size=10,
                search_type='hybrid',
                record_id=None,
                query_text=" ".join(value_desc),
                metadata_filter=None,
                document_filter=None,
                n_results_requested=10
            )
            for idx, record in enumerate(results.get('records', [])):
                doc = record.get('document', '{}')
                if isinstance(doc, str):
                    try:
                        doc = json.loads(doc)
                    except json.JSONDecodeError:
                        print(f"Warning: Failed to parse document as JSON: {doc}")
                        continue
                code_table_name = doc.get('code_table_name', '未知码表名')
                print(f"找到码表：{code_table_name}")
                values.append(doc)

            return values
        except Exception as e:
            return []

    def find_matching_dimensions(self, common_dimensions: List[Dict], dimension_desc: List[str], themes: List[str]) -> List[Dict[str, Any]]:
        """根据维度描述和主题查找匹配的维度，只返回所有主题共有的维度"""
        try:
            response = self._call_llm(self.prompts['match_dimensions']['system'],
                                      self.prompts['match_dimensions']['user']
                                      .format(query_dimensions=json.dumps(dimension_desc, ensure_ascii=False),
                                              themes=json.dumps(themes, ensure_ascii=False),
                                              common_dimensions=json_to_md_table(common_dimensions)))
            
            return self._parse_llm_response(response, [])
        except Exception as e:
            return []
        
    def find_matching_dimensions_new(self, query_dimensions: List[str], dimensions: List[Dict], history_messages) -> List[Dict[str, Any]]:
        """根据维度描述和主题查找匹配的维度，只返回所有主题共有的维度"""
        try:
            response = self._call_llm(self.prompts['match_dimensions_new']['system'],
                                      self.prompts['match_dimensions_new']['user']
                                      .format(query_dimensions=json.dumps(query_dimensions, ensure_ascii=False),
                                              dimensions=json_to_md_table(dimensions)), history_messages)
            
            return self._parse_llm_response(response, [])
        except Exception as e:
            return []

    def generate_filter_conditions(self, filters: List[Dict], themes: List[str]) -> List[Dict[str, Any]]:
        """
        生成过滤条件，只使用所有主题共有的过滤字段
        
        Args:
            filter_desc: 用户描述的过滤条件列表
            themes: 主题列表
        
        Returns:
            List[Dict[str, Any]]: 生成的过滤条件列表
        """
        try: 
            filter_desc = [item['field'] for item in filters]
            # 获取每个主题的过滤字段
            theme_filter_fields = {}
            for theme in themes:
                results = self.chroma.list_records(
                    collection_name=DIMENSIONS_COLLECTION,
                    embedding_model=None,
                    page=1,
                    page_size=10,
                    search_type='hybrid',
                    record_id=None,
                    query_text=" ".join(filter_desc),
                    metadata_filter={self.subject_meta_name: theme},
                    document_filter=None,
                    n_results_requested=10
                )
                # 收集每个主题下所有维度的过滤字段
                theme_filter_fields[theme] = {}
                for idx, record in enumerate(results.get('records', [])):
                    doc = record.get('document', '{}')
                    if isinstance(doc, str):
                        doc = json.loads(doc)
                    field_code = doc.get(self.dimension_code_name, record.get('id', str(idx)))
                    theme_filter_fields[theme][field_code] = {
                        '字段代码': doc.get(self.dimension_code_name),
                        '字段名称': doc.get(self.dimension_desc_name, '')
                    }
            
            common_fields_codes = set(theme_filter_fields[themes[0]].keys())
            for theme in themes[1:]:
                common_fields_codes &= set(theme_filter_fields[theme].keys())

            common_filter_fields = [
                theme_filter_fields[themes[0]][code]
                for code in common_fields_codes
            ]

            print(f"Common filter fields: {json.dumps(common_filter_fields, ensure_ascii=False, indent=2)}")

            response = self._call_llm(self.prompts['generate_filters']['system'],
                                      self.prompts['generate_filters']['user']
                                      .format(query_filters=json.dumps(filters, ensure_ascii=False, indent=2),
                                              available_fields=json.dumps(common_filter_fields, ensure_ascii=False, indent=2)))
            
            return self._parse_llm_response(response, [])
        except Exception as e:
            return []

    def generate_filter_conditions_new(self, 
                                       filters: List[Dict], 
                                       dimensions: List[Dict], 
                                       avalaible_values: List[Dict],
                                       history_messages) -> List[Dict[str, Any]]:
        """
        生成过滤条件，只使用所有主题共有的过滤字段
        
        Args:
            filter_desc: 用户描述的过滤条件列表
            dimensions: 维度列表
        
        Returns:
            List[Dict[str, Any]]: 生成的过滤条件列表
        """
        try: 
            response = self._call_llm(self.prompts['generate_filters']['system'],
                                      self.prompts['generate_filters']['user']
                                      .format(query_filters=json.dumps(filters, ensure_ascii=False, indent=2),
                                              available_fields=json_to_md_table(dimensions),
                                              available_values=json_to_md_table(avalaible_values)), history_messages)
            
            return self._parse_llm_response(response, [])
        except Exception as e:
            return []

    def query_indicator_data(self, params: Dict[str, Any]) -> Optional[pd.DataFrame]:
        """
        调用指标查询接口获取数据并返回DataFrame格式
        
        Args:
            params (Dict[str, Any]): 查询参数，包含:
                - startDate: 开始日期
                - endDate: 结束日期
                - kpis: 指标列表
                - dims: 维度列表
                
        Returns:
            Optional[pd.DataFrame]: 查询结果DataFrame，如果查询失败返回None
                
        Example:
            >>> params = {
            ...     "startDate": "2024-02-01",
            ...     "endDate": "2024-02-26",
            ...     "kpis": "revenue as 收入",
            ...     "dims": "date as 日期, dept as 科室"
            ... }
            >>> df = query_indicator_data(params)
        """
        try:
            if not params:
                raise ValueError("查询参数不能为空")
                
            # 验证必要参数
            required_params = ['kpis']
            missing_params = [param for param in required_params if not params.get(param)]
            if missing_params:
                raise ValueError(f"缺少必要参数: {', '.join(missing_params)}")
                
            # 调用指标查询服务获取数据
            result = self.metrics.get_kpis(params)
            
            if result is None or (isinstance(result, (list, dict)) and not result):
                raise ValueError("查询未返回任何数据")
                
            # 将结果转换为DataFrame
            try:
                if isinstance(result, pd.DataFrame):
                    df = result
                elif isinstance(result, dict):
                    df = pd.DataFrame.from_dict(result)
                elif isinstance(result, list):
                    df = pd.DataFrame(result)
                else:
                    raise ValueError(f"不支持的数据格式: {type(result)}")
            except Exception as e:
                raise ValueError(f"数据格式转换失败: {str(e)}")
                
            # 基本数据清理
            df = df.fillna('')  # 填充空值
            
            # 对日期列进行处理
            date_columns = df.select_dtypes(include=['datetime64']).columns
            for col in date_columns:
                df[col] = df[col].dt.strftime('%Y-%m-%d')
                
            return df
            
        except ValueError as e:
            print(f"指标查询错误: {str(e)}")
            return None
        except Exception as e:
            print(f"查询指标数据时发生未知错误: {str(e)}")
            return None

    def generate_chart_code(self, data: Dict[str, Any]) -> str:
        """根据查询结果生成图表代码"""
        
        response = self._call_ollama(self.prompts['generate_chart']['system'], json.dumps(data))
        return response

    def _call_llm(self, system_prompt: str, user_prompt: str, history_messages) -> str:
        """调用模型 - 使用 model_client 替代"""
        return self.model_client.chat(
            messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_prompt}
                ],
            model=self.model_name,
            temperature=0,
            history_messages=history_messages
        )

    def _parse_llm_response(self, response: str, default_value: Any = None) -> Any:
        """
        解析大模型返回的JSON格式响应
        
        Args:
            response (str): 大模型返回的响应字符串
            default_value (Any, optional): 解析失败时的默认返回值
            
        Returns:
            Any: 解析后的Python对象，解析失败则返回default_value
        """
        try:
            if not response:
                raise ValueError("LLM返回空响应")
            # 1. 尝试直接解析
            try:
                return json.loads(response)
            except json.JSONDecodeError:
                pass
                
            # 2. 尝试提取JSON部分
            import re
            json_pattern = r'(\{[\s\S]*\}|\[[\s\S]*\])'  # 匹配包括换行符在内的所有JSON内容
            json_match = re.search(json_pattern, response)
            
            if (json_match):
                try:
                    return json.loads(json_match.group())
                except json.JSONDecodeError:
                    pass
            
            # 3. 清理常见的格式问题
            cleaned_response = response.strip()
            # 移除可能的markdown代码块标记
            if cleaned_response.startswith('```json'):
                cleaned_response = cleaned_response[7:]
            if cleaned_response.endswith('```'):
                cleaned_response = cleaned_response[:-3]
                
            cleaned_response = cleaned_response.strip()
            
            try:
                return json.loads(cleaned_response)
            except json.JSONDecodeError as e:
                print(f"JSON parse error: {str(e)}")
                print(f"Raw response: {response}")
                return default_value
                
        except Exception as e:
            print(f"解析LLM响应时发生错误: {str(e)}")
            print(f"原始响应: {response}")
            return default_value
    # df.head(10).to_json(orient='records')
    def get_kpis(self, params):
        return self.metrics.get_kpis(params)
    
    def generate_query_json(self, matching_indicators: List[Dict], 
                       dimension_ids: List[Dict], 
                       filter_conditions: List[Dict], question: str,
                       history_messages) -> Dict:
        """生成查询JSON"""
        
        # 准备指标和维度信息
        indicators_info = "\n".join([f"- 指标代码: {ind['code']}, 指标名称: {ind['name']}" 
                                    for ind in matching_indicators])
        dimensions_info = "\n".join([f"- 维度代码: {dim['code']}, 维度名称: {dim['name']}, 关联码表：{dim['table']}" 
                                    for dim in dimension_ids])
        filters_info = json.dumps(filter_conditions, ensure_ascii=False)
        
        # 构建完整prompt
        current_date = datetime.now().strftime('%Y-%m-%d')
        context = self.prompts['query_generator']['user'].format(
            current_date=current_date,
            indicators_info=indicators_info,
            dimensions_info=dimensions_info,
            filters_info=filters_info,
            question=question
        )
        
        # 调用大模型
        response = self._call_llm(self.prompts['query_generator']['system'], context, history_messages)
        
        # 解析返回结果
        return self._parse_llm_response(
            response, {}
        )
    
    def generate_plot_code(self, question: str = None, query_params: str = None, df_metadata: str = None) -> Dict[str, Any]:
        try:
            if not df_metadata:
                raise ValueError("缺少数据框架元数据信息")
                
            if question is not None:
                system_msg = f"The following is a pandas DataFrame that contains the results of the query that answers the question the user asked: '{question}'"
            else:
                system_msg = "The following is a pandas DataFrame "

            if query_params is not None:
                system_msg += f"\n\nThe DataFrame was produced using this params: {query_params}\n\n"
            
            system_msg += f"The following is information about the resulting pandas DataFrame 'df': \n{df_metadata}"

            plotly_code = self.model_client.chat(
                messages=[
                    {"role": "system", "content": self.prompts['generate_plot']['system'].format(system_msg=system_msg)},
                    {"role": "user", "content": self.prompts['generate_plot']['user']}
                ],
                model=self.model_name
            )

            if not plotly_code:
                raise ValueError("生成图表代码失败")
                
            sanitized_code = self._sanitize_plotly_code(self._extract_python_code(plotly_code))
            
            return {
                'status': 'success',
                'code': sanitized_code
            }
            
        except Exception as e:
            return {
                'status': 'error',
                'message': f"生成图表代码时发生错误: {str(e)}",
                'code': None
            }
    
    def _extract_python_code(self, markdown_string: str) -> str:
        # Regex pattern to match Python code blocks
        pattern = r"```[\w\s]*python\n([\s\S]*?)```|```([\\s\S]*?)```"

        # Find all matches in the markdown string
        matches = re.findall(pattern, markdown_string, re.IGNORECASE)

        # Extract the Python code from the matches
        python_code = []
        for match in matches:
            python = match[0] if match[0] else match[1]
            python_code.append(python.strip())

        if len(python_code) == 0:
            return markdown_string

        return python_code[0]
    
    def _sanitize_plotly_code(self, raw_plotly_code: str) -> str:
        # Remove the fig.show() statement from the plotly code
        plotly_code = raw_plotly_code.replace("fig.show()", "")

        return plotly_code
    
    def get_plotly_figure(self,
        plotly_code: str, df: pd.DataFrame, dark_mode: bool = True
    ) -> plotly.graph_objs.Figure:
        """
        **Example:**
        ```python
        fig = vn.get_plotly_figure(
            plotly_code="fig = px.bar(df, x='name', y='salary')",
            df=df
        )
        fig.show()
        ```
        Get a Plotly figure from a dataframe and Plotly code.

        Args:
            df (pd.DataFrame): The dataframe to use.
            plotly_code (str): The Plotly code to use.

        Returns:
            plotly.graph_objs.Figure: The Plotly figure.
        """
        ldict = {"df": df, "px": px, "go": go}
        try:
            exec(plotly_code, globals(), ldict)

            fig = ldict.get("fig", None)
        except Exception as e:
            # 如果df只有一条数据，则使用指标图显示
            if len(df) == 1:
                numeric_cols = df.select_dtypes(include=["number"]).columns.tolist()
                if numeric_cols:
                    value = df[numeric_cols[0]].iloc[0]
                    title_text = numeric_cols[0]
                else:
                    value = df.iloc[0, 0]
                    title_text = df.columns[0]
                fig = go.Figure(go.Indicator(
                    mode="number",
                    value=value,
                    title={"text": title_text}
                ))
            else:
                # Inspect data types
                numeric_cols = df.select_dtypes(include=["number"]).columns.tolist()
                categorical_cols = df.select_dtypes(
                    include=["object", "category"]
                ).columns.tolist()

                # Decision-making for plot type
                if len(numeric_cols) >= 2:
                    # Use the first two numeric columns for a scatter plot
                    fig = px.scatter(df, x=numeric_cols[0], y=numeric_cols[1])
                elif len(numeric_cols) == 1 and len(categorical_cols) >= 1:
                    # Use a bar plot if there's one numeric and one categorical column
                    fig = px.bar(df, x=categorical_cols[0], y=numeric_cols[0])
                elif len(categorical_cols) >= 1 and df[categorical_cols[0]].nunique() < 10:
                    # Use a pie chart for categorical data with fewer unique values
                    fig = px.pie(df, names=categorical_cols[0])
                else:
                    # Default to a simple line plot if above conditions are not met
                    fig = px.line(df)

        if fig is None:
            return None

        if dark_mode:
            fig.update_layout(template="plotly_dark")

        return fig
    
    def update_dimensions(self, json_input):
        """
        更新传入的 JSON 数组
        过滤掉 dimension_code 以 "Date" 或 "Time" 结尾的项，
        并追加一项 {"dimension_code": "StatisticDate", "dimension_description": "统计日期"}.
        
        参数:
            json_input: 原始的 JSON 字符串或已经解析成 Python 列表格式的数据.
        
        返回:
            更新后的列表.
        """
        # 如果输入是 JSON 字符串，则解析为列表
        if isinstance(json_input, str):
            try:
                data = json.loads(json_input)
            except json.JSONDecodeError as e:
                print("输入的字符串不是有效的 JSON 格式:", e)
                return json_input
        elif isinstance(json_input, list):
            data = json_input
        else:
            print("输入数据格式不正确，请提供 JSON 字符串或列表格式的数据。")
            return json_input

        # 过滤掉 dimension_code 以 "Date" 或 "Time" 结尾的项
        updated_list = [item for item in data 
                        if not (item.get("dimension_code", "").endswith("Date") or item.get("dimension_code", "").endswith("Time"))]

        # 添加新的项
        updated_list.append({
            "dimension_code": "StatisticDate",
            "dimension_description": "统计日期"
        })

        return updated_list

    def process_indicator_query(self, query: str, user_id: str, dark_mode: bool = False, permission_check: bool = True):
        """处理完整的指标查询流程，并根据参数决定是否进行权限控制"""
        try:
            result = {
                'parsed_result': None,
                'match_indicators': None,
                'match_dimensions': None,
                'match_filters': None,
                'query_params': None,
                'data': None,
                'plot_code': None,
                'figure': None,
                'status': 'success',
                'message': '',
                'type': 'indicator',
                'step': 0,
                'step_names': ['解析查询', '匹配指标', '匹配维度', '生成过滤条件', '生成查询参数', '执行查询', '生成图表代码', '创建图表'],
                'step_count': 8
            }
            # 1. 解析用户查询
            result['step'] = 1
            parsed_result = self.parse_user_query(query)
            if not parsed_result:
                return {**result, 'status': 'error', 'message': '查询解析失败'}
            result['parsed_result'] = parsed_result
            print(f"Parsed result: {parsed_result}")

            # 2. 匹配指标
            result['step'] = 2
            indicator_desc = parsed_result.get("indicators", [])
            match_result = self.find_matching_indicators(query, indicator_desc)
            if not match_result or not match_result.get('indicators'):
                text2sql_result = self.text2sql.process_text2sql_query(query)
                return text2sql_result
            matching_indicators = match_result['indicators']
            themes = match_result['themes']
            print(f"Matching indicators: {matching_indicators}")

            # 权限检查：指标
            if permission_check:
                authorized_indicators, unauthorized_indicators, err_msg = apply_indicator_permissions(user_id, matching_indicators)
                if err_msg:
                    return {**result, 'status': 'error', 'message': err_msg}
                matching_indicators = authorized_indicators
                print(f"Authorized indicators: {matching_indicators}")
            else:
                print("Skipping indicator permissions check")
            result['match_indicators'] = matching_indicators

            # 3. 匹配维度
            result['step'] = 3
            query_dimensions = parsed_result.get("dimensions", [])
            # 通过向量库匹配维度，然后再经过模型挑选
            # dimension_desc = parsed_result.get("dimensions", [])
            # matching_dimensions = []
            # if dimension_desc:
            #     matching_dimensions = self.find_matching_dimensions(dimension_desc, themes)
            #     result['match_dimensions'] = matching_dimensions
            #     print(f"Matching dimensions: {matching_dimensions}")

            # 权限检查：维度及取值范围作为过滤条件
            matching_dimensions = []
            authorized_dimensions = []
            avalaible_dimensions = []
            dim_filters = []
            if permission_check:
                auth_ids = [indicator['auth_id'] for indicator in authorized_indicators]
                authorized_dimensions, dim_filters, err_msg = get_authorized_dimensions_and_filters(auth_ids)
                avalaible_dimensions = authorized_dimensions
                print(f"Authorized dimensions: {authorized_dimensions}")
                print(f"Authorized filters: {dim_filters}")
                if err_msg:
                    return {**result, 'status': 'error', 'message': err_msg}
                if query_dimensions:
                    matching_dimensions = self.find_matching_dimensions_new(query_dimensions, authorized_dimensions)
            else:
                if query_dimensions:
                    avalaible_dimensions = self.get_avalaible_dimensions(query_dimensions, themes)
                    matching_dimensions = self.find_matching_dimensions(avalaible_dimensions, query_dimensions, themes)
                    print(f"Matching dimensions from LLM: {matching_dimensions}")
            result['match_dimensions'] = matching_dimensions
            print(f"Matching dimensions: {matching_dimensions}")

            # 4. 生成过滤条件
            result['step'] = 4
            filter_conditions = []
            filters = parsed_result.get("filters", [])
            llm_filters = []
            if filters:
                llm_filters = self.generate_filter_conditions_new(filters, avalaible_dimensions)
                print(f"Filter conditions generated by LLM: {llm_filters}")
            if dim_filters:
                computed_filters, err = compute_filter_intersection(dim_filters, llm_filters)
                if err:
                    return {**result, 'status': 'error', 'message': err}
                filter_conditions.extend(computed_filters)
            else:
                filter_conditions.extend(llm_filters)
            print(f"Filter conditions: {filter_conditions}")
            result['match_filters'] = filter_conditions

            # 5. 生成查询参数
            result['step'] = 5
            query_params = self.generate_query_json(
                matching_indicators=matching_indicators,
                dimension_ids=matching_dimensions,
                filter_conditions=filter_conditions,
                question=query
            )
            result['query_params'] = query_params
            print(f"Query params: {query_params}")

            # 6. 执行数据查询
            result['step'] = 6
            query_result = self.query_indicator_data(query_params)
            if query_result is None or query_result.empty:
                return {**result, 'status': 'error', 'message': '查询未返回数据'}
            result['data'] = query_result
            print(f"Query result: {query_result.head(10).to_json(orient='records')}")

            # 7. 生成图表代码
            result['step'] = 7
            plot_code = self.generate_plot_code(
                question=query,
                query_params=query_params,
                df_metadata=f"Running df.dtypes gives:\n {query_result.dtypes}"
            )
            if not plot_code:
                return {**result, 'status': 'error', 'message': '图表代码生成失败'}
            result['plot_code'] = plot_code
            print(f"Plot code: {plot_code}")

            # 8. 生成Plotly图表
            result['step'] = 8
            figure = self.get_plotly_figure(
                plotly_code=plot_code['code'],
                df=query_result,
                dark_mode=dark_mode
            )
            result['figure'] = figure
            print("Figure generated successfully")

            return result

        except Exception as e:
            import traceback
            error_msg = f"步骤 {result.get('step', 0)} 处理失败: {str(e)}\n{traceback.format_exc()}"
            print("\n=== Error Details ===")
            print(error_msg)
            return {**result, 'status': 'error', 'message': error_msg}

    async def process_indicator_query_stream(self, query: str, user_id: str, dark_mode: bool = False, permission_check: bool = True, history_messages: List[Dict[str, Any]] = []):
        result = {
            'parsed_result': None,
            'match_indicators': None,
            'match_dimensions': None,
            'match_filters': None,
            'query_params': None,
            'data': None,
            'plot_code': None,
            'figure': None,
            'status': 'processing',
            'message': '',
            'think': '',
            'type': 'indicator',
            'step': 0,
            'step_names': ['解析查询', '匹配主题' ,'匹配指标', '匹配维度', '生成过滤条件', '生成查询参数', '执行查询', '生成图表代码', '创建图表'],
            'step_count': 9
        }
        try:
            # 1. 解析用户查询 #'parse_query'
            result['step'] = 1
            yield {**result, 'message': '正在解析查询...', 'think': '正在分析用户的查询意图，提取指标、维度和过滤条件...\n', 'split': True}
            parsed_result = self.parse_user_query(query, history_messages)
            if not parsed_result:
                result['status'] = 'error'
                result['message'] = '查询解析失败'
                result['think'] = '无法理解用户的查询意图'
                yield result
                return
            result['parsed_result'] = parsed_result
            yield {**result, 'message': '查询解析完成', 'think': f"从查询中识别出 {len(parsed_result.get('indicators', []))} 个指标、{len(parsed_result.get('dimensions', []))} 个维度和 {len(parsed_result.get('filters', []))} 个过滤条件\n", 'split': True}
            print(f"Parsed result: {parsed_result}")

            # 2. 匹配主题 'match_subjects'
            result['step'] = 2
            yield {**result, 'message': '正在匹配主题...', 'think': '正在从主题库中查找最匹配的主题...\n', 'split': True}
            match_subjects_result = self.find_matching_subjects(query, history_messages)
            if not match_subjects_result or not match_subjects_result.get('subject'):
                # result['status'] = 'error'
                # result['message'] = '主题匹配失败'
                # result['think'] = '无法找到匹配的主题'
                # yield result
                yield {**result, 'message': '主题匹配失败', 'think': '无法找到匹配的主题', 'split': True}
                async for item in self.text2sql.text2sql_stream(query, user_id, dark_mode, permission_check, history_messages):
                    yield item
                return
            matched_subject = match_subjects_result.get('subject', {})
            result['matched_subject'] = matched_subject
            yield {**result, 'message': '主题匹配完成', 'think': f"从主题库中找到最匹配的主题：{matched_subject.get('subject_area')}\n", 'split': True}
            print(f"Matching subject: {matched_subject}")

            # 3. 匹配指标 'match_indicators'
            result['step'] = 3
            yield {**result, 'message': '正在匹配指标...', 'think': '正在从指标库中查找最匹配的指标...\n', 'split': True}
            indicator_desc = parsed_result.get("indicators", [])
            subject_area = matched_subject.get(self.subject_meta_name)
            match_result = self.find_matching_indicators(query, indicator_desc, subject_area, history_messages)
            if not match_result or not match_result.get('indicators'):
                # result['status'] = 'error'
                yield {**result, 'message': '指标匹配失败', 'think': '无法找到匹配的指标', 'split': True}
                async for item in self.text2sql.text2sql_stream(query, user_id, dark_mode, permission_check, history_messages):
                    yield item
                return
            matching_indicators = match_result['indicators']
            themes = match_result['themes']
            print(f"Matching indicators: {matching_indicators}")

            # 权限检查：指标
            if permission_check:
                authorized_indicators, unauthorized_indicators, err_msg = apply_indicator_permissions(user_id, matching_indicators)
                if err_msg:
                    result['status'] = 'error'
                    result['message'] = err_msg
                    result['think'] = '用户没有访问所需指标的权限'
                    yield result
                    return
                matching_indicators = authorized_indicators
                print(f"Authorized indicators: {authorized_indicators}")
            result['match_indicators'] = matching_indicators
            yield {**result, 'message': '指标匹配完成', 'think': f"成功匹配到 {len(matching_indicators)} 个符合条件的指标\n", 'split': True}

            # 4. 匹配维度 'match_dimensions'
            result['step'] = 4
            yield {**result, 'message': '正在匹配维度...', 'think': '正在查找相关维度信息...\n', 'split': True}
            query_dimensions = parsed_result.get("dimensions", [])
            matching_dimensions = []
            authorized_dimensions = []
            avalaible_dimensions = []
            dim_filters = []
            
            if permission_check:
                auth_ids = [indicator['auth_id'] for indicator in authorized_indicators]
                authorized_dimensions, dim_filters, err_msg = get_authorized_dimensions_and_filters(auth_ids)
                avalaible_dimensions = authorized_dimensions
                if err_msg:
                    result['status'] = 'error'
                    result['message'] = err_msg
                    result['think'] = '获取授权维度失败'
                    yield result
                    return
                if query_dimensions:
                    matching_dimensions = self.find_matching_dimensions_new(query_dimensions, authorized_dimensions, history_messages)
            else:
                if query_dimensions:
                    avalaible_dimensions = self.get_avalaible_dimensions(query_dimensions, themes)
                    matching_dimensions = self.find_matching_dimensions_new(query_dimensions, avalaible_dimensions, history_messages)
            result['avalaible_dimensions'] = self.update_dimensions(avalaible_dimensions)
            print(f"Matching dimensions: {result['match_dimensions']}")
            result['match_dimensions'] = matching_dimensions
            yield {**result, 'message': '维度匹配完成', 'think': f"找到 {len(matching_dimensions)} 个相关维度\n", 'split': True}
            print(f"Matching dimensions: {matching_dimensions}")

            # 5. 生成过滤条件 'generating_filters'
            result['step'] = 5
            yield {**result, 'message': '正在生成过滤条件...', 'think': '正在处理用户指定的过滤条件...\n', 'split': True}
            filter_conditions = []
            filters = parsed_result.get("filters", [])
            llm_filters = []
            avalaible_filter_dimensions = []
            if filters:
                if permission_check:
                    avalaible_filter_dimensions = authorized_dimensions
                else:
                    filter_dimensions = [item["field"] for item in filters]
                    avalaible_filter_dimensions = self.get_avalaible_dimensions(filter_dimensions, themes)
                filter_values = [item["value"] for item in filters]
                avalaible_values = self.get_avalaible_code_table_values(filter_values)
                llm_filters = self.generate_filter_conditions_new(filters, avalaible_filter_dimensions, avalaible_values, history_messages)
                print(f"Filter values from llm: {llm_filters}")
            if dim_filters:
                computed_filters, err = compute_filter_intersection(dim_filters, llm_filters)
                if err:
                    result['status'] = 'error'
                    result['message'] = err
                    result['think'] = '用户没有访问所需维度的权限'
                    yield result
                    return
                filter_conditions.extend(computed_filters)
            else:
                filter_conditions.extend(llm_filters)
            result['match_filters'] = filter_conditions
            yield {**result, 'message': '过滤条件生成完成', 'think': f"生成了 {len(filter_conditions)} 个过滤条件\n", 'split': True}
            print(f"Filter conditions: {filter_conditions}")

            # 6. 生成查询参数 'generating_query_params'
            result['step'] = 6
            yield {**result, 'message': '正在生成查询参数...', 'think': '正在构建数据查询参数...\n', 'split': True}
            query_params = transform_json(self.generate_query_json(
                matching_indicators=matching_indicators,
                dimension_ids=matching_dimensions,
                filter_conditions=filter_conditions,
                question=query,
                history_messages=history_messages
            ))
            result['query_params'] = query_params
            yield {**result, 'message': '查询参数生成完成', 'think': f'查询参数构建成功: {query_params}，准备执行查询\n', 'split': True}
            print(f"Query parameters: {query_params}")

            # 7. 执行数据查询 'querying_data'
            result['step'] = 7
            yield {**result, 'message': '正在查询数据...', 'think': '正在从数据库获取数据...\n', 'split': True}
            query_result = self.query_indicator_data(query_params)
            if query_result is None or query_result.empty:
                result['status'] = 'error'
                result['message'] = '查询未返回数据'
                result['think'] = '数据查询结果为空'
                yield result
                return
            result['data'] = query_result
            yield {**result, 'message': '数据查询完成', 'think': f"成功获取 {len(query_result)} 条数据记录\n", 'split': True}
            print(f"Query result: {query_result.head(10).to_json(orient='records')}")

            # 8. 生成图表代码 'generating_plot'
            result['step'] = 8
            yield {**result, 'message': '正在生成图表代码...', 'think': '正在分析数据特征，选择合适的图表类型...\n', 'split': True}
            # plot_code = self.generate_plot_code(
            #     question=query,
            #     query_params=query_params,
            #     df_metadata=f"Running df.dtypes gives:\n {query_result.dtypes}"
            # )
            # if not plot_code:
            #     result['status'] = 'error'
            #     result['message'] = '图表代码生成失败'
            #     result['think'] = '无法生成合适的图表代码'
            #     yield result
            #     return
            # result['plot_code'] = plot_code
            yield {**result, 'message': '图表代码生成完成', 'think': '已生成最适合的可视化图表代码\n', 'split': True}

            # 9. 生成Plotly图表 'generating_figure'
            result['step'] = 9
            yield {**result, 'message': '正在生成图表...', 'think': '正在渲染可视化图表...\n', 'split': True}
            # figure = self.get_plotly_figure(
            #     plotly_code=plot_code['code'],
            #     df=query_result,
            #     dark_mode=dark_mode
            # )
            # result['figure'] = figure
            result['status'] = 'success'
            yield {**result, 'message': '图表生成完成', 'think': '数据可视化完成，图表已准备就绪', 'split': True}
            yield {**result, 'message': '图表生成完成', 'think': '数据可视化完成，图表已准备就绪'}

        except Exception as e:
            import traceback
            error_msg = f"步骤 {result.get('step', 0)} 处理失败: {str(e)}\n{traceback.format_exc()}"
            yield {**result, 'status': 'error', 'message': error_msg, 'think': '处理过程中发生错误'}