import cx_Oracle
import pandas as pd
from typing import List, Dict
from vanna.remote import VannaDefault
from vanna.openai.openai_chat import OpenAI_Chat
from vanna.milvus.milvus_vector import Milvus_VectorStore
from pymilvus import MilvusClient
from openai import OpenAI
import os
import numpy as np
import dashscope
from dashscope import TextEmbedding
from flask import Flask, jsonify, Response, request, redirect, url_for
import flask
from cache import MemoryCache
from functools import wraps
import json
app = Flask(__name__, static_url_path='')
vn = None
# SETUP
cache = MemoryCache()

# 1. 首先定义向量化类
class DashScopeEmbedding:
    def __init__(self):
        self.api_key = "sk-bf0b69213b7c4aa09c3e912a9cf07a86"
        dashscope.api_key = self.api_key
        self.dimension = 1536  # text-embedding-v2 的输出维度
        
    def encode_documents(self, texts):
        if isinstance(texts, str):
            texts = [texts]
            
        embeddings = []
        for text in texts:
            try:
                resp = TextEmbedding.call(
                    model='text-embedding-v2',
                    input=text,
                    api_key=self.api_key
                )
                if resp.status_code == 200:
                    # 处理字典格式的 embedding
                    embedding_data = resp.output['embeddings'][0]
                    if isinstance(embedding_data, dict):
                        # 如果是字典，尝试获取 embedding 值
                        if 'embedding' in embedding_data:
                            embedding = np.array(embedding_data['embedding'], dtype=np.float32)
                        else:
                            print(f"Embedding dict keys: {embedding_data.keys()}")
                            embedding = np.zeros(self.dimension, dtype=np.float32)
                    else:
                        embedding = np.array(embedding_data, dtype=np.float32)
                    
                    embeddings.append(embedding)
                else:
                    print(f"Error getting embedding: {resp.code} - {resp.message}")
                    embeddings.append(np.zeros(self.dimension, dtype=np.float32))
            except Exception as e:
                print(f"Exception during embedding: {str(e)}")
                print(f"Response content: {resp.output if 'resp' in locals() else 'No response'}")
                embeddings.append(np.zeros(self.dimension, dtype=np.float32))
        
        # 确保所有向量都�����确的形状
        embeddings = [e.reshape(self.dimension) if e.size == self.dimension else np.zeros(self.dimension, dtype=np.float32) for e in embeddings]
        return np.vstack(embeddings)
    
    def encode_queries(self, texts):
        return self.encode_documents(texts)
        
    def __call__(self, text):
        embeddings = self.encode_documents(text)
        return embeddings if len(embeddings.shape) > 1 else embeddings[0]

# 2. 初始化 OpenAI 客户端
def init_openai_client():
    """
    初始化并返回 OpenAI 客户端实例
    """
    return OpenAI(
        api_key="sk-bf0b69213b7c4aa09c3e912a9cf07a86",
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", 
        default_headers = {"x-foo": "true"}
    )

client = init_openai_client()

# 3. 定义 MyVanna 类
class MyVanna(Milvus_VectorStore, OpenAI_Chat):
    def __init__(self, client=None, config=None):
        config = config or {}
        # 使用自定义的向量化函数
        config['embedding_function'] = DashScopeEmbedding()
        config['milvus_client'] = MilvusClient(uri=config.get("milvus_uri", "http://localhost:19530"))
        Milvus_VectorStore.__init__(self, config=config)
        OpenAI_Chat.__init__(self, client=client, config=config)

# 4. 定义初始化函数
def init_vanna(username: str, password: str, host: str, port: str, service_name: str):
    """
    初始化并配置 Vanna 实例，使用 Milvus 向量存储
    """
    vn = MyVanna(
        client=client, 
        config={
            "model": "qwen2.5-7b-instruct",
            "milvus_uri": "http://192.168.2.110:19530",  # 使用 http 协议
            "collection_name": "sql_vectors", # 集合名称
            "use_local_models": False         # 禁用本地模型下载
        }
    )
    vn.max_tokens = 800
    vn.temperature = 0.5
    dsn = cx_Oracle.makedsn(host, port, service_name=service_name)
    vn.connect_to_oracle(
        dsn=dsn, 
        user=username, 
        password=password
    )
    return vn

def train_vanna_with_ddl(vn, ddl_list: List[Dict]) -> List[str]:
    """
    使用 DDL 语句训练 Vanna
    
    Args:
        vn: Vanna 实例
        ddl_list: 包含 DDL 语句和描述的字典列表
    
    Returns:
        List[str]: 训练记录的 ID 列表
    """
    training_ids = []
    for ddl_info in ddl_list:
        try:
            # 使用 Vanna 的 train 方法进行训练
            training_id = vn.train(    # DDL 训练不需要 SQL
                ddl=ddl_info['ddl']
            )
            training_ids.append(training_id)
            print(f"成功训练 DDL，训练 ID: {training_id}")
            print(f"DDL: {ddl_info['ddl'][:100]}...")  # 只打印前100个字符
            print(f"描述: {ddl_info['description']}")
            print("-" * 80)
        except Exception as e:
            print(f"训练失败: {str(e)}")
            print(f"DDL: {ddl_info['ddl'][:100]}...")
            print("-" * 80)
    
    return training_ids

def connect_to_oracle(username: str, password: str, host: str, port: str, service_name: str) -> cx_Oracle.Connection:
    """
    连接到Oracle数据库
    """
    dsn = cx_Oracle.makedsn(host, port, service_name=service_name)
    connection = cx_Oracle.connect(username, password, dsn)
    return connection

def get_table_metadata(connection: cx_Oracle.Connection) -> pd.DataFrame:
    """
    获取所有表的元数据信息，包括表名和表注释，
    排除备份表（表名包含BAK或以数字结尾的表）
    """
    # 获取当前连接的用户名
    username = connection.username.upper()
    
    query = """
    SELECT 
        t.owner as OWNER,
        t.table_name as TABLE_NAME,
        t.comments as COMMENTS
    FROM all_tab_comments t
    WHERE t.owner = :owner
    AND t.table_type = 'TABLE' 
    AND t.table_name NOT LIKE '%BAK%'  -- 排除包含BAK的表
    AND NOT REGEXP_LIKE(t.table_name, '[0-9]$')  -- 排除以数字结尾的表
    AND NOT REGEXP_LIKE(t.table_name, '[0-9]{8}')  -- 排除包含8位数字(日期)的表
    """
    
    return pd.read_sql(query, connection, params={'owner': username})

def get_column_metadata(connection: cx_Oracle.Connection) -> pd.DataFrame:
    """
    获取所有列的元数据信息，包括列名、数据类型和注释，
    排除备份表（表名包含BAK或以数字结尾的表）
    """
    # 获取当前连接的用户名
    username = connection.username.upper()

    query = """
    SELECT 
        c.owner as OWNER,
        c.table_name as TABLE_NAME,
        c.column_name as COLUMN_NAME,
        c.data_type as DATA_TYPE,
        c.data_length as DATA_LENGTH,
        c.nullable as NULLABLE,
        cc.comments as COMMENTS
    FROM all_tab_columns c
    LEFT JOIN all_col_comments cc 
        ON c.owner = cc.owner 
        AND c.table_name = cc.table_name 
        AND c.column_name = cc.column_name
    WHERE c.owner = :owner
    AND c.table_name NOT LIKE '%BAK%'  -- 排除包含BAK的表
    AND NOT REGEXP_LIKE(c.table_name, '[0-9]$')  -- 排除以数字结尾的表
    AND NOT REGEXP_LIKE(c.table_name, '[0-9]{8}')  -- 排除包含8位数字(日期)的表
    ORDER BY c.owner, c.table_name, c.column_id
    """
    
    return pd.read_sql(query, connection, params={'owner': username})

def generate_ddl_statements(tables_df: pd.DataFrame, columns_df: pd.DataFrame) -> List[Dict]:
    """
    生成用于Vanna���练的DDL语句和描述
    """
    ddl_list = []
    
    for _, table in tables_df.iterrows():
        table_columns = columns_df[
            (columns_df['OWNER'] == table['OWNER']) & 
            (columns_df['TABLE_NAME'] == table['TABLE_NAME'])
        ]
        
        # 构建CREATE TABLE语句
        ddl = f"CREATE TABLE {table['OWNER']}.{table['TABLE_NAME']} (\n"
        column_definitions = []
        
        for _, column in table_columns.iterrows():
            col_def = f"    {column['COLUMN_NAME']} {column['DATA_TYPE']}"
            if column['DATA_TYPE'] in ('VARCHAR2', 'CHAR'):
                col_def += f"({column['DATA_LENGTH']})"
            if column['NULLABLE'] == 'N':
                col_def += " NOT NULL"
            if column['COMMENTS']:
                col_def += f" -- {column['COMMENTS']}"
            column_definitions.append(col_def)
        
        ddl += ",\n".join(column_definitions)
        ddl += "\n)"
        
        # 如果有表注释，添加注释语句
        if table['COMMENTS']:
            ddl += f"\n\nCOMMENT ON TABLE {table['OWNER']}.{table['TABLE_NAME']} IS '{table['COMMENTS']}'"
        
        # 为Vanna准备训练数据
        ddl_info = {
            'ddl': ddl,
            'description': f"表 {table['OWNER']}.{table['TABLE_NAME']} 的结构定义。"
            f"该表{table['COMMENTS'] if table['COMMENTS'] else '未提供描述'}。"
        }
        ddl_list.append(ddl_info)
    
    return ddl_list

def generate_example_queries(tables_df: pd.DataFrame, columns_df: pd.DataFrame) -> List[Dict]:
    """
    为每个表生成示例查询语句和描述
    """
    query_list = []
    
    for _, table in tables_df.iterrows():
        table_columns = columns_df[
            (columns_df['OWNER'] == table['OWNER']) & 
            (columns_df['TABLE_NAME'] == table['TABLE_NAME'])
        ]
        
        # 构建注释说明
        table_description = f"查询{table['COMMENTS'] if table['COMMENTS'] else table['TABLE_NAME']}"
        
        # 构建SELECT语句
        query = f"-- {table_description}\nSELECT\n"
        column_definitions = []
        
        for idx, (_, column) in enumerate(table_columns.iterrows()):
            # 构建列定义，包含注释
            col_def = f"    {column['COLUMN_NAME']}"
            
            # 只在不是最后一个字段时添加逗号
            if idx < len(table_columns) - 1:
                col_def += ","
                
            # 添加注释（如果有）
            if column['COMMENTS']:
                col_def += f" -- {column['COMMENTS']}"
            
            column_definitions.append(col_def)
        
        query += "\n".join(column_definitions)
        query += f"\nFROM {table['OWNER']}.{table['TABLE_NAME']}"
        
        # 准备训练数据
        query_info = {
            'question': table_description,
            'sql': query,
            'documentation': f"""
这个查询返回{table['COMMENTS'] if table['COMMENTS'] else table['TABLE_NAME']}的所有信息。
包含以下字段：
{', '.join([f"{col['COLUMN_NAME']}({col['COMMENTS'] if col['COMMENTS'] else '无说明'})" 
           for _, col in table_columns.iterrows()])}
"""
        }
        query_list.append(query_info)
    
    return query_list

def train_vanna_with_queries(vn, query_list: List[Dict]) -> List[str]:
    """
    使用生成的查询语句训练 Vanna
    """
    training_ids = []
    for query_info in query_list:
        try:
            training_id = vn.train(
                question=query_info['question'] + query_info['documentation'],
                sql=query_info['sql']
            )
            training_ids.append(training_id)
            print(f"成功训练查询，训练 ID: {training_id}")
            print(f"问题: {query_info['question']}{query_info['documentation']}")
            print(f"SQL: \n{query_info['sql']}")
            print("-" * 80)
        except Exception as e:
            print(f"训练失败: {str(e)}")
            print(f"SQL: {query_info['sql'][:100]}...")
            print("-" * 80)
    
    return training_ids

def init_app(prepare_only: bool = False, train_data: bool = False, config: dict = None):
    """
    初始化应用
    Args:
        prepare_only: 是否只准备训练数据而不训练，默认为 False
        train_data: 是否进行数据训练，默认为 False
    """
    global vn
    # 数据库连接配置
    config = config or {}
        
    try:
        # 初始化 Vanna
        vn = init_vanna(**config)
        
        if prepare_only or train_data:
            # 连接数据库
            connection = connect_to_oracle(**config)
            
            # 获取元数据
            tables_df = get_table_metadata(connection)
            columns_df = get_column_metadata(connection)
            
            print(f"获取到 {len(tables_df)} 个表的元数据")
            
            if prepare_only:
                # 准备训练数据
                training_data = prepare_training_data(tables_df, columns_df)
                save_training_data(training_data, output_file='training_data.json')
                print("训练数据准备完成，跳过训练阶段")
                return vn
            
            # 1. 首先训练 DDL
            print("开始训练表结构(DDL)...")
            ddl_list = generate_ddl_statements(tables_df, columns_df)
            print(f"生成了 {len(ddl_list)} 个 DDL 语句")
            ddl_training_ids = train_vanna_with_ddl(vn, ddl_list)
            print(f"DDL训练完成，共训练 {len(ddl_training_ids)} 个表结构")
            
            # 2. 然后训练示例查询
            print("\n开始训练示例查询...")
            query_list = generate_example_queries(tables_df, columns_df)
            print(f"生成了 {len(query_list)} 个查询语句")
            query_training_ids = train_vanna_with_queries(vn, query_list)
            print(f"查询训练完成，共训练 {len(query_training_ids)} 个查询")
            
            

            # 获取训练数据总数
            training_data = vn.get_training_data()
            print("\n训练总结:")
            print(f"成功训练 {len(ddl_training_ids)} 个表结构")
            print(f"成功训练 {len(query_training_ids)} 个示例查询")
            print(f"ChromaDB 中总共有 {len(training_data)} 条训练数据")
            
            connection.close()
        else:
            print("跳过数据训练阶段")
            
        return vn

    except Exception as e:
        print(f"初始化错误: {str(e)}")
        import traceback
        traceback.print_exc()
        if 'connection' in locals():
            connection.close()
        raise

def prepare_training_data(tables_df: pd.DataFrame, columns_df: pd.DataFrame) -> dict:
    """
    准备训练数据但不执行训练，将数据保存到文件
    """
    training_data = {
        'ddl_data': [],
        'query_data': []
    }
    
    # 准备 DDL 训练数据
    ddl_list = generate_ddl_statements(tables_df, columns_df)
    training_data['ddl_data'] = ddl_list
    
    # 准备查询训练数据
    query_list = generate_example_queries(tables_df, columns_df)
    training_data['query_data'] = query_list
    
    return training_data

def save_training_data(training_data: dict, output_file: str = 'training_data.json'):
    """
    将训练数据保存到文件
    """
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(training_data, f, ensure_ascii=False, indent=2)
    print(f"训练数据已保存到: {output_file}")

def main():
    global vn
    try:
        # 从环境变量或命令行参数获取训练标志
        #train_data = os.getenv('TRAIN_DATA', 'false').lower() == 'true'
        # yy_config = {
        #     'username': 'RESM_WHJT_PROD',
        #     'password': 'rZzj82q5Q2QBZ765zR6sU',
        #     'host': '192.168.2.105',
        #     'port': '1521',
        #     'service_name': 'ORCLPDB1'
        # }
        # 办事大厅
        # config = {
        #     'username': 'LZUWSBSDT',
        #     'password': 'Lzu!yL7N#5KD',
        #     'host': '192.168.2.15',
        #     'port': '1521',
        #     'service_name': 'ORCLPDB1'
        # }
        config = {
            'username': 'grsjzx',
            'password': 'Hzsun.com!2',
            'host': '192.168.2.15',
            'port': '1521',
            'service_name': 'ORCLPDB1'
        }
        # config = {
        #     'username': 'bpm_test',
        #     'password': 'Hzsun.com!2',
        #     'host': '192.168.2.15',
        #     'port': '1521',
        #     'service_name': 'ORCLPDB1'
        # }
        # 初始化应用
        vn = init_app(prepare_only=False, train_data=True, config=config)
        # print(vn.ask("show tables"));
        # 启动 Flask 服务
        app.run(host='0.0.0.0', port=18888, debug=False)
    except Exception as e:
        print(f"运行错误: {str(e)}")
        import traceback
        traceback.print_exc()

# NO NEED TO CHANGE ANYTHING BELOW THIS LINE
def requires_cache(fields):
    def decorator(f):
        @wraps(f)
        def decorated(*args, **kwargs):
            id = request.args.get('id')

            if id is None:
                return jsonify({"type": "error", "error": "No id provided"})
            
            for field in fields:
                if cache.get(id=id, field=field) is None:
                    return jsonify({"type": "error", "error": f"No {field} found"})
            
            field_values = {field: cache.get(id=id, field=field) for field in fields}
            
            # Add the id to the field_values
            field_values['id'] = id

            return f(*args, **field_values, **kwargs)
        return decorated
    return decorator

@app.route('/api/v0/generate_questions', methods=['GET'])
def generate_questions():
    return jsonify({
        "type": "question_list", 
        "questions": vn.generate_questions(),
        "header": "Here are some questions you can ask:"
        })

@app.route('/api/v0/generate_sql', methods=['GET'])
def generate_sql():
    question = flask.request.args.get('question')

    if question is None:
        return jsonify({"type": "error", "error": "No question provided"})

    id = cache.generate_id(question=question)
    sql = vn.generate_sql(question=question)

    cache.set(id=id, field='question', value=question)
    cache.set(id=id, field='sql', value=sql)

    return jsonify(
        {
            "type": "sql", 
            "id": id,
            "text": sql,
        })

@app.route('/api/v0/run_sql', methods=['GET'])
@requires_cache(['sql'])
def run_sql(id: str, sql: str):
    try:
        df = vn.run_sql(sql=sql)

        cache.set(id=id, field='df', value=df)

        return jsonify(
            {
                "type": "df", 
                "id": id,
                "df": df.head(10).to_json(orient='records'),
            })

    except Exception as e:
        return jsonify({"type": "error", "error": str(e)})

@app.route('/api/v0/download_csv', methods=['GET'])
@requires_cache(['df'])
def download_csv(id: str, df):
    csv = df.to_csv()

    return Response(
        csv,
        mimetype="text/csv",
        headers={"Content-disposition":
                 f"attachment; filename={id}.csv"})

@app.route('/api/v0/generate_plotly_figure', methods=['GET'])
@requires_cache(['df', 'question', 'sql'])
def generate_plotly_figure(id: str, df, question, sql):
    try:
        code = vn.generate_plotly_code(question=question, sql=sql, df_metadata=f"Running df.dtypes gives:\n {df.dtypes}")
        fig = vn.get_plotly_figure(plotly_code=code, df=df, dark_mode=False)
        fig_json = fig.to_json()

        cache.set(id=id, field='fig_json', value=fig_json)

        return jsonify(
            {
                "type": "plotly_figure", 
                "id": id,
                "fig": fig_json,
            })
    except Exception as e:
        # Print the stack trace
        import traceback
        traceback.print_exc()

        return jsonify({"type": "error", "error": str(e)})

@app.route('/api/v0/get_training_data', methods=['GET'])
def get_training_data():
    df = vn.get_training_data()

    return jsonify(
    {
        "type": "df", 
        "id": "training_data",
        "df": df.head(25).to_json(orient='records'),
    })

@app.route('/api/v0/remove_training_data', methods=['POST'])
def remove_training_data():
    # Get id from the JSON body
    id = flask.request.json.get('id')

    if id is None:
        return jsonify({"type": "error", "error": "No id provided"})

    if vn.remove_training_data(id=id):
        return jsonify({"success": True})
    else:
        return jsonify({"type": "error", "error": "Couldn't remove training data"})

@app.route('/api/v0/train', methods=['POST'])
def add_training_data():
    question = flask.request.json.get('question')
    sql = flask.request.json.get('sql')
    ddl = flask.request.json.get('ddl')
    documentation = flask.request.json.get('documentation')

    try:
        id = vn.train(question=question, sql=sql, ddl=ddl, documentation=documentation)

        return jsonify({"id": id})
    except Exception as e:
        print("TRAINING ERROR", e)
        return jsonify({"type": "error", "error": str(e)})

@app.route('/api/v0/generate_followup_questions', methods=['GET'])
@requires_cache(['df', 'question', 'sql'])
def generate_followup_questions(id: str, df, question, sql):
    followup_questions = vn.generate_followup_questions(question=question, sql=sql, df=df)

    cache.set(id=id, field='followup_questions', value=followup_questions)

    return jsonify(
        {
            "type": "question_list", 
            "id": id,
            "questions": followup_questions,
            "header": "Here are some followup questions you can ask:"
        })

@app.route('/api/v0/load_question', methods=['GET'])
@requires_cache(['question', 'sql', 'df', 'fig_json', 'followup_questions'])
def load_question(id: str, question, sql, df, fig_json, followup_questions):
    try:
        return jsonify(
            {
                "type": "question_cache", 
                "id": id,
                "question": question,
                "sql": sql,
                "df": df.head(10).to_json(orient='records'),
                "fig": fig_json,
                "followup_questions": followup_questions,
            })

    except Exception as e:
        return jsonify({"type": "error", "error": str(e)})

@app.route('/api/v0/get_question_history', methods=['GET'])
def get_question_history():
    return jsonify({"type": "question_history", "questions": cache.get_all(field_list=['question']) })

@app.route('/')
def root():
    return app.send_static_file('index.html')



if __name__ == "__main__":
    main()
