import os
import re
import time
import json
from collections import Counter
# from transformers import AutoTokenizer, AutoModel, pipeline # local call llm
from core.llm import SiliconFlowLLM  # api call llm
from core.document_processor import DocumentProcessor
from core.knowledge_base import KnowledgeBase
from core.database import DataBase

# 导入新增功能的模块
from langchain.prompts import PromptTemplate
from langchain.chains import create_sql_query_chain
import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
import io
import base64
from datetime import datetime


# 从main.py中导入的工具函数
def extract_sql_from_markdown(markdown_text):
    """从Markdown文本中提取SQL代码块"""
    sql_pattern = r"```sql\s*(.*?)\s*```"
    match = re.search(sql_pattern, markdown_text, re.DOTALL)

    if match:
        return match.group(1).strip()

    # 如果没有明确的代码块，尝试直接提取SQL
    return markdown_text.strip()


def convert_sql_result_to_dataframe(sql_result):
    """将SQL结果转换为DataFrame"""
    if isinstance(sql_result, list) and len(sql_result) > 0:
        if isinstance(sql_result[0], dict):
            return pd.DataFrame(sql_result)

    # 如果结果格式不适合转换为DataFrame
    return None


def analyze_data_for_visualization(df, sql, question):
    """分析数据并推荐可视化类型"""
    if df is None or len(df) == 0:
        return {"chart_type": "text", "message": "No data available for visualization"}

    # 分析数据结构决定合适的图表类型
    num_rows = len(df)
    num_cols = len(df.columns)

    # 检测数值型和分类型列
    numeric_cols = df.select_dtypes(include=['number']).columns.tolist()
    categorical_cols = df.select_dtypes(include=['object']).columns.tolist()

    # 基本逻辑：根据数据特征选择图表
    if num_rows <= 1:
        return {"chart_type": "text", "message": "Single row data is best presented as text"}

    # 检测是否包含时间序列
    time_series = False
    time_col = None
    for col in df.columns:
        if 'time' in col.lower() or 'date' in col.lower() or '日期' in col.lower() or '时间' in col.lower():
            time_series = True
            time_col = col
            break

    # 检测SQL是否执行了聚合或计数操作
    aggregation = False
    if re.search(r'count\s*\(', sql, re.IGNORECASE) or re.search(r'sum\s*\(', sql, re.IGNORECASE) or re.search(
            r'avg\s*\(', sql, re.IGNORECASE):
        aggregation = True

    # 检测是否为排名或TOP-N查询
    ranking = False
    if re.search(r'order\s+by', sql, re.IGNORECASE) and re.search(r'(top|limit|rownum|\<\=\s*\d+)', sql, re.IGNORECASE):
        ranking = True

    # 检测是否包含比例或百分比
    percentage = False
    for col in df.columns:
        if '%' in col or '比例' in col or '占比' in col:
            percentage = True
            break

    # 基于问题内容的分析
    has_comparison = '比较' in question or 'compare' in question.lower() or '对比' in question
    has_trend = '趋势' in question or 'trend' in question.lower() or '变化' in question
    has_distribution = '分布' in question or 'distribution' in question.lower() or '分布' in question
    has_composition = '组成' in question or 'composition' in question.lower() or '构成' in question

    # 图表类型决策逻辑
    if time_series and has_trend and len(numeric_cols) >= 1:
        return {"chart_type": "line", "x_column": time_col, "y_columns": numeric_cols}

    elif ranking and len(numeric_cols) >= 1:
        return {"chart_type": "bar", "x_column": categorical_cols[0] if categorical_cols else df.columns[0],
                "y_column": numeric_cols[0]}

    elif percentage or has_composition:
        return {"chart_type": "pie", "label_column": categorical_cols[0] if categorical_cols else df.columns[0],
                "value_column": numeric_cols[0] if numeric_cols else df.columns[1]}

    elif has_comparison and len(numeric_cols) >= 1 and len(categorical_cols) >= 1:
        return {"chart_type": "bar", "x_column": categorical_cols[0], "y_column": numeric_cols[0]}

    elif has_distribution and len(numeric_cols) >= 1:
        return {"chart_type": "histogram", "column": numeric_cols[0]}

    elif num_cols == 2 and len(numeric_cols) >= 1 and len(categorical_cols) >= 1:
        return {"chart_type": "bar", "x_column": categorical_cols[0], "y_column": numeric_cols[0]}

    elif num_cols == 2 and len(numeric_cols) == 2:
        return {"chart_type": "scatter", "x_column": numeric_cols[0], "y_column": numeric_cols[1]}

    elif len(categorical_cols) >= 2:
        return {"chart_type": "heatmap"}

    else:
        # 默认表格展示
        return {"chart_type": "table"}


def visualize_sql_result(sql_result, query, question):
    """
    根据SQL查询结果生成可视化图表

    Args:
        sql_result: SQL查询结果
        query: 执行的SQL查询
        question: 用户原始问题

    Returns:
        生成的图表文件路径或None
    """
    # 将SQL结果转换为DataFrame
    df = convert_sql_result_to_dataframe(sql_result)
    if df is None or len(df) == 0:
        return None

    # 分析数据并获取可视化推荐
    viz_recommendation = analyze_data_for_visualization(df, query, question)
    chart_type = viz_recommendation["chart_type"]

    # 如果推荐的是文本展示或表格，则不生成图表
    if chart_type in ["text", "table"]:
        return None

    plt.figure(figsize=(10, 6))

    # 根据推荐的图表类型生成可视化
    if chart_type == "line":
        x_col = viz_recommendation.get("x_column")
        y_cols = viz_recommendation.get("y_columns", [])
        if not y_cols:
            y_cols = [col for col in df.columns if col != x_col]

        for y_col in y_cols:
            plt.plot(df[x_col], df[y_col], marker='o', label=y_col)

        plt.xlabel(x_col)
        plt.ylabel("Value")
        plt.title(f"{question} - 趋势图")
        plt.legend()
        plt.grid(True)

    elif chart_type == "bar":
        x_col = viz_recommendation.get("x_column")
        y_col = viz_recommendation.get("y_column")

        if len(df) > 15:  # 对于较长的数据只显示前15个
            df = df.iloc[:15]
            title_suffix = "（仅显示前15条记录）"
        else:
            title_suffix = ""

        plt.bar(df[x_col], df[y_col])
        plt.xlabel(x_col)
        plt.ylabel(y_col)
        plt.title(f"{question} - 柱状图{title_suffix}")
        plt.xticks(rotation=45)
        plt.tight_layout()

    elif chart_type == "pie":
        label_col = viz_recommendation.get("label_column")
        value_col = viz_recommendation.get("value_column")

        if len(df) > 8:  # 对于较多的分类，只显示前7个和"其他"
            top_n = df.nlargest(7, value_col)
            others_sum = df.iloc[7:][value_col].sum()
            top_n = pd.concat([top_n, pd.DataFrame({label_col: ['其他'], value_col: [others_sum]})])
            df = top_n

        plt.pie(df[value_col], labels=df[label_col], autopct='%1.1f%%')
        plt.axis('equal')
        plt.title(f"{question} - 饼图")

    elif chart_type == "histogram":
        col = viz_recommendation.get("column")

        plt.hist(df[col], bins=10, alpha=0.7)
        plt.xlabel(col)
        plt.ylabel("频率")
        plt.title(f"{question} - 直方图")
        plt.grid(True)

    elif chart_type == "scatter":
        x_col = viz_recommendation.get("x_column")
        y_col = viz_recommendation.get("y_column")

        plt.scatter(df[x_col], df[y_col])
        plt.xlabel(x_col)
        plt.ylabel(y_col)
        plt.title(f"{question} - 散点图")
        plt.grid(True)

    # 生成时间戳文件名
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    output_file = f"visualization_{timestamp}.png"

    # 保存图表
    plt.savefig(output_file, dpi=300, bbox_inches='tight')
    plt.close()

    return output_file


# FewShotPromptTemplate类
class FewShotPromptTemplate:
    def __init__(self, examples, example_prompt, prefix, suffix, input_variables):
        self.examples = examples
        self.example_prompt = example_prompt
        self.prefix = prefix
        self.suffix = suffix
        self.input_variables = input_variables

    def format(self, **kwargs):
        # 格式化前缀
        formatted_prefix = self.prefix.format(**{k: v for k, v in kwargs.items() if k in self.input_variables})

        # 格式化示例
        formatted_examples = []
        for example in self.examples:
            formatted_example = self.example_prompt.format(**example)
            formatted_examples.append(formatted_example)

        # 格式化后缀
        formatted_suffix = self.suffix.format(**{k: v for k, v in kwargs.items() if k in self.input_variables})

        # 组合成最终的提示
        return formatted_prefix + "\n\n" + "\n\n".join(formatted_examples) + "\n\n" + formatted_suffix


class NL2SQLChatbot:
    def __init__(self, documents_dir: str = "../data", embedding_dim: int = 384):
        """
        Initialize the NL2SQL Chatbot with document processing and database capabilities

        Args:
            documents_dir: Directory containing knowledge documents
            embedding_dim: Dimension of embedding vectors
        """
        # Initialize the LLM
        self.model_engine = SiliconFlowLLM()

        # Database connection
        self.db = DataBase()

        # Load schema info
        self.schema_info = self.db.get_schema_as_dict()

        # Document processing
        self.doc_processor = DocumentProcessor(embedding_dim=embedding_dim)
        self.knowledge_base = KnowledgeBase(embedding_dim=embedding_dim)

        # Try to load cached knowledge base
        start_time = time.time()
        load_result = self.knowledge_base.load()
        print("knowledge_base load_result: ", load_result)

        if "No cache" in load_result:
            # Process documents if no cache
            self._process_documents(documents_dir)
            # Save knowledge base for future use
            self.knowledge_base.save()
        else:
            print(f"Loaded knowledge base in {time.time() - start_time:.2f}s")

        # Conversation history & summary
        self.conversation_history = []

        # Query optimization
        self.common_terms = Counter()

        # Few-shot examples for SQL generation
        self.setup_few_shot_examples()

    def setup_few_shot_examples(self):
        """设置few-shot学习的示例"""
        self.examples = [
            {
                "input": "查询名称中包含'船闸'的所有卡口基本信息，按名称排序。",
                "query": "SELECT ID, NAME, UNIT_ID, DES, PHOTOHTTP FROM BAYONET_BASICS.SYS_BAYONET WHERE NAME LIKE '%船闸%' ORDER BY NAME",
            },
            {
                "input": "统计一月份各卡口点的船舶通过数量，按照通过船舶数量降序排列，只显示通过量前10的卡口。",
                "query": "SELECT * FROM (SELECT b.NAME AS 卡口名称, COUNT(d.CODE) AS 通过船舶数量 FROM BAYONET_BASICS.SYS_BAYONET b JOIN BAYONET_DYNAMIC.DATAFUSION d ON b.ID = d.BAYONET_ID WHERE d.PASSTIME >= TRUNC(SYSDATE, 'YYYY') AND d.PASSTIME < ADD_MONTHS(TRUNC(SYSDATE, 'YYYY'), 1) GROUP BY b.NAME ORDER BY 通过船舶数量 DESC) WHERE ROWNUM <= 10",
            },
            {
                "input": "查询系统中各类型预警的发生次数和占比，按发生次数从高到低排序。",
                "query": "WITH warning_counts AS (SELECT t.NAME AS 预警类型, COUNT(w.CODE) AS 预警次数 FROM BAYONET_BUSSINESS.ZD_WARNING_TYPE t JOIN BAYONET_BUSSINESS.BOAT_WARNING w ON t.ID = w.WARNING_TYPE GROUP BY t.NAME) SELECT 预警类型, 预警次数, ROUND(预警次数 * 100 / SUM(预警次数) OVER(), 2) || '%' AS 占比 FROM warning_counts ORDER BY 预警次数 DESC"
            },
            {
                "input": "分析一下2025年1月油墩港(东大盈船闸)的流量组成。",
                "query": "SELECT COUNT(da.CODE) AS TOTAL_FLOW, SUM(CASE WHEN da.DIRECTION = 1 THEN 1 ELSE 0 END) AS UPSTREAM_FLOW, SUM(CASE WHEN da.DIRECTION = 2 THEN 1 ELSE 0 END) AS DOWNSTREAM_FLOW FROM BAYONET_DYNAMIC.DATAFUSION da LEFT JOIN BAYONET_BASICS.SYS_BAYONET sb ON da.BAYONET_ID = sb.ID WHERE sb.NAME = '油墩港(东大盈船闸)' AND da.PASSTIME >= TO_DATE('2025-01-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS') AND da.PASSTIME <= TO_DATE('2025-01-31 23:59:59', 'YYYY-MM-DD HH24:MI:SS')"
            },
            {
                "input": "查询油墩港(东大盈船闸)2025年 1月份各类预警的案件总数与占比",
                "query": "WITH filtered_data AS (SELECT zwt.NAME AS WARNING_TYPE_NAME FROM BAYONET_DYNAMIC.DATAFUSION df JOIN BAYONET_BUSSINESS.BOAT_WARNING bw ON df.CODE = bw.EVENTCODE JOIN BAYONET_BUSSINESS.ZD_WARNING_TYPE zwt ON bw.WARNING_TYPE = zwt.ID JOIN BAYONET_BASICS.SYS_BAYONET sb ON df.BAYONET_ID = sb.ID WHERE sb.NAME = '油墩港(东大盈船闸)' AND df.PASSTIME >= DATE '2025-01-01' AND df.PASSTIME < DATE '2025-02-01') SELECT WARNING_TYPE_NAME, COUNT(*) AS record_count, ROUND(COUNT(*) * 100.0 / SUM(COUNT(*)) OVER(), 2) AS percentage FROM filtered_data GROUP BY WARNING_TYPE_NAME ORDER BY WARNING_TYPE_NAME"
            }
        ]

        # 设置Few-shot提示模板
        self.example_prompt = PromptTemplate.from_template("输入问题: {input}\n生成SQL: {query}")
        self.few_shot_prompt = FewShotPromptTemplate(
            examples=self.examples,
            example_prompt=self.example_prompt,
            prefix="你是一个Oracle SQL专家。请根据以下表结构生成对应问题的Oracle SQL语句，Oracle版本较低(11g及以下)，且尽量符合标准语法,不考虑ISDELETE和ISFINISH字段，如果没有强调年份，默认为2025年。\n\n以下是表结构（table_info）：\n{table_info}\n\n这是一些问题及其对应的SQL样例:",
            suffix="\n---\n问题: {input}\n对应SQL: ",
            input_variables=["input", "table_info"],
        )

        # 常规SQL提示模板
        self.sql_prompt = PromptTemplate.from_template(
            """你是一个SQL专家。根据以下表结构：  
            {table_info}  

            问题：{input}  
            请严格按以下规则响应：  
            1. 只输出SQL代码  
            2. 使用标准Oracle SQL语法  
            3. 不要包含分号结尾  
            4. 最多返回{top_k}条记录  
            5. Oracle中使用ROWNUM来限制返回记录数  

            SQL查询："""
        )

    def _process_documents(self, documents_dir):
        """Process all documents in the specified directory"""
        if not os.path.exists(documents_dir):
            print(f"Documents directory not found: {documents_dir}")
            return

        for filename in os.listdir(documents_dir):
            file_path = os.path.join(documents_dir, filename)
            if os.path.isfile(file_path):
                print(f"Processing {filename}...")
                result = self.knowledge_base.add_document(file_path, self.doc_processor)
                print(result)

    def _update_query_statistics(self, query):
        """Update statistics about common query terms for optimization"""
        # Simple tokenization
        terms = re.findall(r'\b\w+\b', query.lower())
        self.common_terms.update(terms)

    def _retrieve_relevant_knowledge(self, query):
        """Retrieve relevant information from knowledge base"""
        # Track query patterns to improve future retrievals
        self._update_query_statistics(query)

        # Use MMR for diverse results
        results = self.knowledge_base.retrieve_with_mmr(query, self.doc_processor, top_k=5, diversity=0.35)

        # results = self.knowledge_base.retrieve_relevant(query, self.doc_processor, top_k=3)

        if not results:
            return "No relevant information found."

        # Group results by source to improve context coherence
        source_groups = {}
        for result in results:
            source = result['metadata']['source']
            if source not in source_groups:
                source_groups[source] = []
            source_groups[source].append(result)

        relevant_text = ""

        # Add results, grouped by source
        for source, items in source_groups.items():
            relevant_text += f"[Document: {source}]\n"

            # Sort items by their original index to maintain document flow
            items.sort(key=lambda x: x['metadata']['index'])

            for item in items:
                relevant_text += f"{item['text']}\n\n"

        return relevant_text

    def _prepare_prompt(self, user_query):
        """Prepare a prompt for the language model"""
        # Retrieve relevant knowledge
        start_time = time.time()
        relevant_knowledge = self._retrieve_relevant_knowledge(user_query)
        retrieval_time = time.time() - start_time
        print(f"Knowledge retrieval time: {retrieval_time:.2f}s")

        # Format conversation history
        history_text = ""

        for turn in self.conversation_history[-3:]:  # Last 3 turns for context
            user_text = turn.get("user", "Unknown User Input")
            system_text = turn.get("system", "No System Response")
            history_text += f"User: {user_text}\nSystem: {system_text}\n"

        # Detect if this is a follow-up question
        is_followup = self._detect_followup_question(user_query)

        # 使用few-shot学习
        few_shot_sql = self.few_shot_prompt.format(input=user_query, table_info=self.schema_info)

        # Format the prompt
        prompt = f"""
        基于数据table schema：{self.schema_info}

        和知识库相关内容：{relevant_knowledge}

        以及历史会话记录：{history_text}
        {'这是基于上一轮对话的一个follow-up。' if is_followup else ''}

        以下是few-shot示例的分析结果，用于参考：
        {few_shot_sql}

        生成合法的SQL SELECT语句，要求：
        1. 根据已知信息判断，是否需要引导用户提问或者提问用户获取信息
        2. 如果有足够信息，生成合法的SQL SELECT语句
        3. 返回查询结果
        4. 使用标准Oracle SQL语法，对于结果限制使用ROWNUM
        5. 如果需要进行分页或限制结果数量，使用"WHERE ROWNUM <= N"语法

        如果模糊判断需要更多信息, respond with: "请提供具体信息: [specific question]"
        如果用户需要引导，respond with:"引导提问: [specific instruction]"
        如果已经有足够信息来生成sql, respond with: "SQL_QUERY: [SQL_QUERY]"
        """
        return prompt

    def _detect_followup_question(self, query):
        """Detect if the current question is a follow-up to previous ones using LLM."""

        # Construct conversation context from the last few exchanges
        conversation_context = ""
        for turn in self.conversation_history[-3:]:  # Last 3 turns for context
            user_text = turn.get("user", "Unknown User Input")
            system_text = turn.get("system", "No System Response")
            conversation_context += f"User: {user_text}\nSystem: {system_text}\n"

        # Construct prompt to ask LLM
        prompt = f"""
        Given the conversation history below:

        {conversation_context}

        User's current query: {query}

        Is the user's current query a follow-up to the previous questions? Respond with 'yes' or 'no'.
        """

        # Call the LLM to evaluate whether the current query is a follow-up question
        followup_response = self.model_engine.call_helper(query=query, prompt=prompt)

        # Parse the LLM's response to determine follow-up
        if followup_response.strip().lower() == 'yes':
            return True
        else:
            return False

    def _handle_exploration_query(self, user_query):
        """
        Handle queries about available data and variables

        Returns:
            Detailed information about database schema and available data
        """
        relevant_knowledge = self._retrieve_relevant_knowledge(user_query)

        # Prepare prompt for data exploration
        exploration_prompt = f"""Database Schema Overview
        Database Schema: {self.schema_info}
        Relevant Knowledge: {relevant_knowledge}

        Answer user's question by analyzing relevant Database schema and relevant knowledge.
        Provide a clear, user-friendly explanation.

        Response Requirements:
        - Answer in the same language as the user's query.
        - Use a clear and structured format (e.g., bullet points, tables).
        - Do not provide SQL queries.
        - Keep the response within 500 words.
        """

        # Generate exploration response
        exploration_response = self.model_engine.call_helper(
            query=user_query,
            prompt=exploration_prompt)

        return exploration_response

    def _needs_explore(self, query):
        prompt = f"""
        Task: Determine if the user is asking a **general exploratory question** about available **data, indicators, metrics, categories, or entities** in a database.
        ### **Exploratory Question Examples** (Answer 'yes' if similar):
        - "What metrics can I query?"
        - "What data is available?"
        - "What kinds of failures are recorded?"
        - "What types of vehicles exist?"
        - "What can I analyze in this dataset?"
        - "Tell me about the available fields in the database."

        ### **Non-Exploratory Questions** (Answer 'no' if similar):
        - "What is the failure rate of vehicle type A?"
        - "How many failures happened last month?"
        - "Show me the top 10 most frequent failure types."
        - "What is the average speed of all vehicles?"
        - "Retrieve data for vehicle ID 1234."

        **Response Format:**  
        - Reply with only `'yes'` if the query is exploratory.  
        - Reply with only `'no'` if the query is specific.  
        - No extra text, explanations, or formatting.

        User Query: {query}
        """
        response = self.model_engine.call_helper(query=query, prompt=prompt)
        return response

    def _finalize_analysis(self, results, user_query, visualization_path=None):
        """对结果进行分析，可选择使用可视化结果"""
        # 转换结果为DataFrame用于分析
        df = None
        try:
            df = convert_sql_result_to_dataframe(results)
        except:
            pass

        # 获取推荐的图表类型，用于分析
        chart_type = None
        if df is not None:
            recommendation = analyze_data_for_visualization(df, "", user_query)
            chart_type = recommendation["chart_type"]

        # 构建分析提示词
        analysis_prompt = f"""
        根据用户的查询 "{user_query}"，分析以下SQL查询结果：
        {results}

        {'图表类型：' + chart_type if chart_type and chart_type != 'text' else ''}
        {'已根据数据生成可视化图表。' if visualization_path else ''}

        请提供：
        1. 数据的简明分析摘要
        2. 关键洞察和发现
        3. 数据模式或趋势的解释（如果明显）
        4. 对结果的业务含义的解读
        5. 可能的后续分析建议

        保持分析简洁、专业且有见解。
        """

        analysis_response = self.model_engine.call_llm(
            query=user_query,
            prompt=analysis_prompt)

        return analysis_response

    def _needs_clarification(self, query, generated_text):
        """Determine if we need more information from the user"""
        if "请提供具体信息:" in generated_text:
            return True, generated_text.split("请提供具体信息:")[1].strip()
        elif "引导提问:" in generated_text:
            return True, generated_text.split("引导提问:")[1].strip()
        return False, ""

    def _extract_sql_query(self, generated_text):
        """
        Extract SQL_QUERY from generated text.
        Handles cases where SQL is wrapped in Markdown or followed by extra content.
        """
        # 先检查是否有SQL_QUERY标记
        if "SQL_QUERY:" in generated_text:
            raw_sql = generated_text.split("SQL_QUERY:")[1].strip()
            # 移除Markdown代码块格式
            clean_sql = re.sub(r"^```sql\s*|\s*```$", "", raw_sql, flags=re.MULTILINE).strip()

            # 提取SQL直到分号
            sql_match = re.search(r"^(.*?);", clean_sql, re.DOTALL)
            if sql_match:
                final_sql = sql_match.group(1).strip()
            else:
                final_sql = clean_sql.strip()

            # 确保不以分号结尾（按Oracle标准）
            final_sql = final_sql.rstrip(";")

            return final_sql

        # 如果没有SQL_QUERY标记，尝试提取Markdown代码块
        elif "```sql" in generated_text:
            return extract_sql_from_markdown(generated_text)

        return None

    def _validate_sql(self, query):
        """Enhanced SQL validation"""
        # Check for dangerous keywords
        dangerous_keywords = ['DROP', 'DELETE', 'TRUNCATE', 'ALTER', 'UPDATE', 'INSERT', 'GRANT', 'REVOKE']
        for keyword in dangerous_keywords:
            if re.search(r'\b' + keyword + r'\b', query.upper()):
                return False, f"SQL contains potentially dangerous keyword: {keyword}"

        # Check for valid table references
        table_pattern = re.compile(r'FROM\s+(\w+)', re.IGNORECASE)
        join_pattern = re.compile(r'JOIN\s+(\w+)', re.IGNORECASE)

        tables_in_query = table_pattern.findall(query) + join_pattern.findall(query)

        for table in tables_in_query:
            if table not in self.schema_info:
                return False, f"Query references unknown table: {table}"

        return True, "Valid query"

    def process_query(self, user_query):
        """Process a natural language query"""
        start_time = time.time()

        # Add to conversation history
        if self.conversation_history and 'user' in self.conversation_history[-1] and not 'system' in \
                                                                                         self.conversation_history[-1]:
            # Update last turn if system hasn't responded yet
            self.conversation_history[-1]['user'] = user_query
        else:
            self.conversation_history.append({'user': user_query})

        _is_exploration = self._needs_explore(user_query)
        if 'yes' in _is_exploration:
            exploration_response = self._handle_exploration_query(user_query)
            self.conversation_history[-1]['system'] = exploration_response
            return exploration_response

        # Prepare prompt and generate response
        prompt = self._prepare_prompt(user_query)
        # 使用few-shot学习生成SQL
        generated_text = self.model_engine.call_coder(query=user_query, prompt=prompt)

        # Check if we need more information
        needs_clarification, clarification_question = self._needs_clarification(user_query, generated_text)
        if needs_clarification:
            response = f"{clarification_question}"
            self.conversation_history[-1]['system'] = response
            return response

        # Extract and validate SQL_QUERY
        sql_query = self._extract_sql_query(generated_text)
        if not sql_query:
            response = "I couldn't generate a valid SQL_QUERY. Can you rephrase your question?"
            self.conversation_history[-1]['system'] = response
            return response

        is_valid, validation_message = self._validate_sql(sql_query)
        if not is_valid:
            response = f"Generated SQL_QUERY is not valid: {validation_message}"
            self.conversation_history[-1]['system'] = response
            return response

        # Execute query and return results
        results = self.db.execute_query(sql_query)

        # 生成可视化（如果结果适合）
        visualization_path = None
        if isinstance(results, list) and len(results) > 1:
            visualization_path = visualize_sql_result(results, sql_query, user_query)

        # 结合数据分析生成响应
        analysis_response = self._finalize_analysis(results, user_query, visualization_path)

        # Format response
        response = f"Generated SQL: {sql_query}\n\nResults:\n{results}"

        # 如果生成了可视化，添加可视化信息
        if visualization_path:
            response += f"\n\n已生成可视化图表：{visualization_path}"

        # 添加分析结果
        response += f"\n\nAnalysis:\n\n{analysis_response}"

        # Add timing info if in debug mode
        total_time = time.time() - start_time
        response += f"\n\nProcessing time: {total_time:.2f}s"

        self.conversation_history[-1]['system'] = response
        return response

    def add_document(self, file_path: str) -> str:
        """
        Add a new document to the knowledge base

        Args:
            file_path: Path to the document file

        Returns:
            Result message
        """
        result = self.knowledge_base.add_document(file_path, self.doc_processor)

        self.knowledge_base.save()  # Update cache
        return result



