#!/usr/bin/env python
# -*- coding: utf-8 -*-

from pymilvus import MilvusClient
import pandas as pd
from tqdm import tqdm
import logging
import os
import sys
import requests
from typing import List, Optional, Dict, Any
import numpy as np
from dotenv import load_dotenv
load_dotenv()

# 添加父目录到路径，以便导入utils
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.embedding_config import EmbeddingProvider, EmbeddingConfig

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# Xinference嵌入类
class XinferenceEmbeddings:
    """使用Xinference服务进行文本嵌入的类"""
    
    def __init__(self, model_name: str, xinference_url: Optional[str] = None):
        """初始化Xinference嵌入处理器"""
        self.model_name = model_name
        # 直接使用model_name作为model_id
        self.model_id = model_name
        
        # 获取Xinference URL
        if xinference_url is None:
            self.base_url = os.environ.get("XINFERENCE_SERVER_URL", "http://10.128.10.186:9997")
        else:
            self.base_url = xinference_url
            
        if not self.base_url.endswith('/'):
            self.base_url += '/'
            
        logging.info(f"初始化Xinference嵌入，模型: {model_name}, 使用ID: {self.model_id}, 服务URL: {self.base_url}")
    
    def __call__(self, texts: List[str]) -> List[List[float]]:
        """向量化文本列表，兼容向量嵌入函数接口"""
        return self.embed_documents(texts)
    
    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """嵌入多个文档"""
        if not self.model_id:
            raise ValueError("模型ID未初始化")
        
        try:
            endpoint = f"{self.base_url}v1/embeddings"
            
            payload = {
                "model": self.model_id,
                "input": texts
            }
            
            response = requests.post(endpoint, json=payload)
            response.raise_for_status()
            result = response.json()
            
            # 提取嵌入向量
            if isinstance(result, dict) and "data" in result and len(result["data"]) > 0:
                embeddings = []
                for item in result["data"]:
                    if isinstance(item, dict) and "embedding" in item:
                        embeddings.append(item["embedding"])
                    else:
                        logging.warning(f"嵌入结果项格式不正确: {item}")
                        # 添加零向量替代
                        embeddings.append([0.0] * 1024)
                
                if embeddings:
                    return embeddings
                else:
                    raise ValueError(f"未能从响应中提取任何有效的嵌入向量")
            else:
                logging.error(f"嵌入结果中没有有效数据: {result}")
                raise ValueError(f"嵌入结果中没有有效数据: {result}")
        
        except Exception as e:
            logging.error(f"Xinference嵌入处理失败: {str(e)}")
            # 返回零向量作为后备
            if len(texts) > 0:
                # 返回1024维零向量
                return [[0.0] * 1024 for _ in range(len(texts))]
            return []

def create_schema_from_dataframe(df: pd.DataFrame, vector_dim: int) -> Dict:
    """根据DataFrame创建Milvus的Schema"""
    from pymilvus import DataType, FieldSchema, CollectionSchema
    
    # 基本字段（主键ID和向量字段）
    fields = [
        FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
        FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=vector_dim),
    ]
    
    # 添加标准字段，确保所有记录都有一致的结构
    standard_fields = {
        "term": 200,  # 术语名称
        "category": 100,  # 分类
        "description": 2000,  # 描述
        "risk_level": 50,  # 风险等级
        "tips": 500,  # 相关建议
    }
    
    # 添加标准字段
    for field_name, max_length in standard_fields.items():
        fields.append(
            FieldSchema(name=field_name, dtype=DataType.VARCHAR, max_length=max_length)
        )
    
    # 根据DataFrame列动态添加其他字段
    for col_name in df.columns:
        # 跳过已定义的字段
        if col_name in ["id", "vector"] or col_name in standard_fields:
            continue
            
        # 确定字段类型和最大长度
        max_length = df[col_name].astype(str).str.len().max()
        max_length = max(max_length, 100)  # 确保最小长度为100
        
        fields.append(
            FieldSchema(name=col_name, dtype=DataType.VARCHAR, max_length=max_length)
        )
    
    schema = CollectionSchema(fields, 
                            "Finance Entities", 
                            enable_dynamic_field=True)
    
    return schema

def enrich_finance_terms(df: pd.DataFrame) -> pd.DataFrame:
    """
    为简单的金融术语列表添加额外的信息字段
    
    Args:
        df: 包含金融术语的DataFrame
        
    Returns:
        丰富后的DataFrame
    """
    # 确认DataFrame的结构
    if len(df.columns) == 2 and df.columns[1] == 'FINTERM':
        # 只有术语和标记的简单结构
        terms = df.iloc[:, 0].tolist()
        
        # 创建新的DataFrame
        enriched_data = []
        
        # 金融分类映射，用于自动分类术语
        categories = {
            '投资': ['Fund', 'Investment', 'Stock', 'Bond', 'ETF', 'Return', 'Yield', 'Portfolio', 'Asset', 'Capital', 'Equity'],
            '银行': ['Bank', 'Deposit', 'Loan', 'Credit', 'Debit', 'Account', 'Branch', 'Overdraft', 'Check'],
            '保险': ['Insurance', 'Premium', 'Policy', 'Claim', 'Risk', 'Coverage', 'Underwriter', 'Actuarial'],
            '会计': ['Accounting', 'Audit', 'Balance', 'Sheet', 'Income', 'Statement', 'Cash', 'Flow', 'Depreciation'],
            '税务': ['Tax', 'Deduction', 'Credit', 'Exemption', 'Rate', 'Filing', 'Return', 'Relief'],
            '金融市场': ['Market', 'Exchange', 'Trade', 'Order', 'Bid', 'Ask', 'Spread', 'Volatility', 'Index'],
            '衍生品': ['Derivative', 'Option', 'Future', 'Swap', 'Forward', 'Contract', 'Call', 'Put'],
            '风险管理': ['Risk', 'Hedge', 'Exposure', 'Diversification', 'Protection', 'Management'],
            '经济': ['Economy', 'GDP', 'Inflation', 'Deflation', 'Growth', 'Recession', 'Monetary', 'Fiscal'],
            '金融科技': ['Fintech', 'Digital', 'Technology', 'Blockchain', 'Crypto', 'Bitcoin', 'Payment']
        }
        
        # 风险等级映射，根据特定关键词
        risk_levels = {
            '高风险': ['Option', 'Future', 'Derivative', 'Swap', 'Volatility', 'Hedge', 'Leverage', 'Margin', 'Speculative', 'Crypto', 'Bitcoin'],
            '中风险': ['Stock', 'Equity', 'Fund', 'ETF', 'Investment', 'Market', 'Growth', 'Yield', 'Bond'],
            '低风险': ['Deposit', 'Savings', 'CD', 'Treasury', 'Inflation', 'Fixed', 'Money Market', 'Cash', 'Insurance']
        }
        
        logging.info(f"开始为{len(terms)}个金融术语生成丰富信息...")
        
        # 为每个术语生成丰富的信息
        for term in tqdm(terms):
            entry = {"term": term}
            
            # 确定类别
            category = '其他'
            for cat, keywords in categories.items():
                if any(keyword.lower() in term.lower() for keyword in keywords):
                    category = cat
                    break
            entry["category"] = category
            
            # 确定风险等级
            risk_level = '中风险'
            for level, keywords in risk_levels.items():
                if any(keyword.lower() in term.lower() for keyword in keywords):
                    risk_level = level
                    break
            entry["risk_level"] = risk_level
            
            # 生成描述 (这里可以使用LLM生成，但为简单起见我们使用模板)
            description = f"'{term}'是一个金融术语，属于{category}领域。" 
            if '基金' in term or 'Fund' in term:
                description += "基金是一种集中投资者资金，由专业人士管理的集合投资工具。"
            elif '股票' in term or 'Stock' in term or 'Share' in term:
                description += "股票代表对公司所有权的一种凭证，持有者可以分享公司利润，并享有相应的权益。"
            elif '债券' in term or 'Bond' in term:
                description += "债券是一种债务证券，表示持有人借给发行人一定金额的债权凭证。"
            elif '保险' in term or 'Insurance' in term:
                description += "保险是一种风险管理工具，通过支付保费，将风险转移给保险公司的财务安排。"
            elif '银行' in term or 'Bank' in term:
                description += "银行是提供存款、贷款、汇款等金融服务的机构。"
            elif '投资' in term or 'Investment' in term:
                description += "投资是将资金投入到某个项目或资产中，期望未来获得收益的行为。"
            elif '风险' in term or 'Risk' in term:
                description += "风险指投资或金融活动中可能发生的损失或不确定性。"
            else:
                description += "这是金融市场中常见的术语，了解该术语有助于更好地理解金融活动和决策。"
                
            entry["description"] = description
            
            # 添加建议
            if risk_level == '高风险':
                entry["tips"] = "该术语关联的金融活动风险较高，投资前请充分了解相关知识，评估自身风险承受能力，合理控制投资比例。"
            elif risk_level == '中风险':
                entry["tips"] = "该术语关联的金融活动有一定风险，建议在理解产品特性的基础上进行合理配置，注意风险分散。"
            else:
                entry["tips"] = "该术语关联的金融活动风险相对较低，但仍需注意市场波动和流动性风险，做好财务规划。"
            
            enriched_data.append(entry)
        
        # 创建新的DataFrame
        new_df = pd.DataFrame(enriched_data)
        return new_df
    else:
        # 已经是丰富的DataFrame，直接返回
        logging.info("输入数据已经包含丰富信息，无需进一步处理")
        return df

def main():
    # 使用Xinference嵌入模型
    xinference_url = os.environ.get("XINFERENCE_SERVER_URL", "http://10.128.10.186:9997")
    logging.info(f"使用Xinference服务URL: {xinference_url}")
    embedding_function = XinferenceEmbeddings(model_name="bge-large", xinference_url=xinference_url)

    # 设置相对路径，适应本地环境
    current_dir = os.path.dirname(os.path.abspath(__file__))
    parent_dir = os.path.dirname(current_dir)

    # 文件路径
    file_path = os.path.join(parent_dir, "data", "finance.csv")
    db_path = os.path.join(parent_dir, "db", "finance_bge.db")

    logging.info(f"使用数据文件: {file_path}")
    logging.info(f"创建数据库: {db_path}")

    # 检查数据文件是否存在
    if not os.path.exists(file_path):
        logging.error(f"数据文件不存在: {file_path}")
        sys.exit(1)

    # 确保db目录存在
    os.makedirs(os.path.dirname(db_path), exist_ok=True)

    # 连接到 Milvus
    try:
        client = MilvusClient(db_path)
    except Exception as e:
        logging.error(f"连接到Milvus数据库失败: {str(e)}")
        # 如果是数据库损坏，尝试删除数据库文件
        if os.path.exists(db_path):
            try:
                os.remove(db_path)
                logging.info(f"已删除损坏的数据库文件: {db_path}")
                client = MilvusClient(db_path)
            except Exception as e2:
                logging.error(f"删除并重新创建数据库失败: {str(e2)}")
                sys.exit(1)

    collection_name = "finance_entities"

    # 加载数据
    logging.info("正在从CSV加载数据")
    try:
        # 尝试不同的编码和分隔符
        try:
            df = pd.read_csv(file_path, encoding='utf-8')
        except:
            try:
                df = pd.read_csv(file_path, encoding='latin1')
            except:
                df = pd.read_csv(file_path, encoding='utf-8', sep=',', header=None)
                if len(df.columns) == 1:
                    df = pd.read_csv(file_path, encoding='utf-8', sep='\t', header=None)
        
        df = df.fillna("NA")
        logging.info(f"成功加载数据，共 {len(df)} 条记录")
        
        # 查看列名
        logging.info(f"数据列名: {df.columns.tolist()}")
        
        # 丰富数据
        df = enrich_finance_terms(df)
        logging.info(f"数据丰富后的列名: {df.columns.tolist()}")
        
    except Exception as e:
        logging.error(f"加载CSV文件失败: {str(e)}")
        sys.exit(1)

    # 获取向量维度（使用一个样本文档）
    sample_doc = "Sample Text"
    sample_embedding = embedding_function([sample_doc])[0]
    vector_dim = len(sample_embedding)
    logging.info(f"向量维度: {vector_dim}")

    # 创建Schema
    schema = create_schema_from_dataframe(df, vector_dim)

    # 如果集合已存在，先删除
    try:
        if client.has_collection(collection_name):
            client.drop_collection(collection_name)
            logging.info(f"已删除现有集合: {collection_name}")
    except Exception as e:
        logging.warning(f"尝试删除现有集合时出错: {str(e)}")

    # 创建集合
    client.create_collection(
        collection_name=collection_name,
        schema=schema,
    )
    logging.info(f"已创建新集合: {collection_name}")

    # 在创建集合后添加索引
    index_params = client.prepare_index_params()
    index_params.add_index(
        field_name="vector",  # 指定要为哪个字段创建索引，这里是向量字段
        index_type="AUTOINDEX",  # 使用自动索引类型，Milvus会根据数据特性选择最佳索引
        metric_type="COSINE",  # 使用余弦相似度作为向量相似度度量方式
        params={"nlist": 1024}  # 索引参数：nlist表示聚类中心的数量，值越大检索精度越高但速度越慢
    )

    client.create_index(
        collection_name=collection_name,
        index_params=index_params
    )
    logging.info(f"已为集合创建索引: {collection_name}")

    # 决定用于嵌入的字段 - 使用term字段或第一列
    embedding_field = "term" if "term" in df.columns else df.columns[0]
    logging.info(f"使用 '{embedding_field}' 字段作为嵌入文本源")
    
    # 添加额外的描述信息到嵌入文本
    def get_embedding_text(row):
        text = str(row[embedding_field])
        if "description" in row and not pd.isna(row["description"]):
            text += " - " + str(row["description"])
        if "category" in row and not pd.isna(row["category"]):
            text += " - 分类: " + str(row["category"])
        return text

    # 批量处理，降低批次大小以适应较小内存环境
    batch_size = 128  # 较小的批次大小
    total_processed = 0

    for start_idx in tqdm(range(0, len(df), batch_size), desc="处理批次"):
        end_idx = min(start_idx + batch_size, len(df))
        batch_df = df.iloc[start_idx:end_idx]

        # 准备文档，结合术语和描述以获取更好的语义嵌入
        if "description" in batch_df.columns:
            docs = [get_embedding_text(row) for _, row in batch_df.iterrows()]
        else:
            docs = [str(row[embedding_field]) for _, row in batch_df.iterrows()]

        # 生成嵌入
        try:
            embeddings = embedding_function(docs)
            logging.info(f"已生成批次 {start_idx // batch_size + 1} 的嵌入向量")
        except Exception as e:
            logging.error(f"为批次 {start_idx // batch_size + 1} 生成嵌入向量时出错: {e}")
            continue

        # 准备数据
        data = []
        for idx, (_, row) in enumerate(batch_df.iterrows()):
            entity_data = {"vector": embeddings[idx]}
            # 添加所有列
            for col in df.columns:
                entity_data[col] = str(row[col])
            data.append(entity_data)

        # 插入数据
        try:
            res = client.insert(
                collection_name=collection_name,
                data=data
            )
            total_processed += len(data)
            logging.info(f"插入批次 {start_idx // batch_size + 1}，已插入 {len(data)} 条记录，总处理: {total_processed}")
        except Exception as e:
            logging.error(f"插入批次 {start_idx // batch_size + 1} 时出错: {e}")

    logging.info("插入过程已完成。")
    logging.info(f"总共插入记录数: {total_processed} / {len(df)}")

    # 测试查询
    try:
        # 使用嵌入字段的一个值作为测试查询
        query = df[embedding_field].iloc[0]
        logging.info(f"使用查询测试: '{query}'")
        query_embeddings = embedding_function([query])

        # 搜索余弦相似度最高的
        search_result = client.search(
            collection_name=collection_name,
            data=[query_embeddings[0]],
            limit=5,
            output_fields=[embedding_field, "description", "category", "risk_level", "tips"]
        )
        logging.info(f"'{query}' 的搜索结果:")
        for item in search_result[0]:
            logging.info(f"- {item['entity'][embedding_field]}: {item['distance']}")
            if "description" in item["entity"]:
                logging.info(f"  描述: {item['entity']['description'][:100]}...")
            
        logging.info("数据库创建完成并已测试，可以正常使用了！")
    except Exception as e:
        logging.error(f"测试查询时出错: {e}")
        logging.info("数据库创建完成，但测试查询失败，请检查日志。")

if __name__ == "__main__":
    main() 