import logging
import os
import re
from typing import Dict, List

import pandas as pd
import tqdm
from dotenv import load_dotenv
from pymilvus import CollectionSchema, DataType, FieldSchema, MilvusClient
from sentence_transformers import SentenceTransformer

from utils.embedding_config import EmbeddingConfig, EmbeddingProvider
from utils.embedding_factory import EmbeddingFactory

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

load_dotenv()


class StdService:
    """
    金融术语标准化服务
    使用向量数据库进行金融术语的标准化和相似度搜索
    """

    def __init__(
        self,
        provider="huggingface",
        model="./bge-m3/",
        db_path="http://localhost:19530",
        collection_name="financial_concepts",
    ):
        """
        初始化标准化服务

        Args:
            provider: 嵌入模型提供商 (openai/bedrock/huggingface)
            model: 使用的模型名称
            db_path: Milvus 数据库路径
            collection_name: 集合名称
        """
        # 根据 provider 字符串匹配正确的枚举值
        provider_mapping = {
            "huggingface": EmbeddingProvider.HUGGINGFACE,
        }

        # 创建 embedding 函数
        embedding_provider = provider_mapping.get(provider.lower())
        if embedding_provider is None:
            raise ValueError(f"不支持的提供商: {provider}")

        config = EmbeddingConfig(provider=embedding_provider, model_name=model)
        self.embedding_func = EmbeddingFactory.create_embedding_function(config)

        # 连接 Milvus
        self.client = MilvusClient(db_path)
        self.collection_name = collection_name

        # 尝试加载现有集合，失败则清理并重新创建
        if self.client.has_collection(collection_name):
            try:
                self.client.load_collection(collection_name)
                logger.info(f"成功加载现有集合: {collection_name}")
            except Exception as e:
                logger.warning(f"加载集合失败，将清理并重建: {str(e)}")
                try:
                    self.client.release_collection(collection_name)
                    self.client.drop_collection(collection_name)
                except Exception as e:
                    logger.warning(f"清理旧集合失败: {str(e)}")
                logger.warning(f"创建新集合: {collection_name}")
                self._create_collection()
                self.client.load_collection(self.collection_name)
        else:
            logger.warning(f"创建新集合: {collection_name}")
            self._create_collection()
            self.client.load_collection(self.collection_name)

        logger.info(f"初始化完成")

    def _create_collection(self):
        """创建集合和索引"""

        file_path = os.getenv("DATA_PATH", "data\万条金融标准术语.csv")

        # 加载数据
        logging.info("Loading data from CSV")
        df = pd.read_csv(
            file_path,
            names=["concept_name", "term_type"],
            dtype=str,
            header=None,
            low_memory=False,
        ).fillna("NA")

        # 获取向量维度（使用一个样本文档）
        sample_doc = "Sample Text"
        sample_embedding = self.embedding_func.embed_documents([sample_doc])[0]
        vector_dim = len(sample_embedding)

        # 定义集合模式 (简化版，匹配当前CSV数据结构)
        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(
                name="vector", dtype=DataType.FLOAT_VECTOR, dim=vector_dim
            ),  # BGE-m3 最重要
            FieldSchema(name="concept_name", dtype=DataType.VARCHAR, max_length=200),
            FieldSchema(name="term_type", dtype=DataType.VARCHAR, max_length=20),
        ]
        schema = CollectionSchema(fields, description="金融术语标准化集合")

        # 创建集合
        self.client.create_collection(
            collection_name=self.collection_name, schema=schema
        )

        # 创建索引
        # 在创建集合后添加索引
        index_params = self.client.prepare_index_params()
        index_params.add_index(
            field_name="vector",  # 指定要为哪个字段创建索引，这里是向量字段
            metric_type="COSINE",  # 使用余弦相似度作为向量相似度度量方式
        )

        self.client.create_index(
            collection_name=self.collection_name, index_params=index_params
        )

        # 批量处理
        batch_size = 1024

        for start_idx in tqdm.tqdm(
            range(0, len(df), batch_size), desc="Processing batches"
        ):
            end_idx = min(start_idx + batch_size, len(df))
            batch_df = df.iloc[start_idx:end_idx]

            # 准备文档
            # docs = [f"Term: {row['concept_name']}; Synonyms: {row['Synonyms']}" for _, row in batch_df.iterrows()]
            docs = []
            for _, row in batch_df.iterrows():
                doc_parts = [row["concept_name"]]

                # if row['Full Name'] != "NA" and row['Full Name'] != row['concept_name']:
                #     doc_parts.append(",Full Name: " + row['Full Name'])

                # if row['Synonyms'] != "NA" and row['Synonyms'] != row['concept_name']:
                #     doc_parts.append(", Synonyms: " + row['Synonyms'])

                # if row['Definitions'] != "NA" and row['Definitions'] not in [row['concept_name'], row.get('Full Name', '')]:
                #     doc_parts.append(", Definitions: " + row['Definitions'])

                docs.append(" ".join(doc_parts))

            # 生成嵌入
            try:
                embeddings = self.embedding_func.embed_documents(docs)
                logging.info(
                    f"Generated embeddings for batch {start_idx // batch_size + 1}"
                )
            except Exception as e:
                logging.error(
                    f"Error generating embeddings for batch {start_idx // batch_size + 1}: {e}"
                )
                continue

            # 准备数据
            data = [
                {
                    "vector": embeddings[idx],
                    "concept_name": str(row["concept_name"]),
                    "term_type": str(row.get("term_type", "NA")),
                }
                for idx, (_, row) in enumerate(batch_df.iterrows())
            ]

            # 插入数据 - 1024个向量条目，即1024个术语（标准概念）
            try:
                res = self.client.insert(
                    collection_name=self.collection_name, data=data
                )
                logging.info(
                    f"Inserted batch {start_idx // batch_size + 1}, result: {res}"
                )
            except Exception as e:
                logging.error(
                    f"Error inserting batch {start_idx // batch_size + 1}: {e}"
                )

        logging.info("Insert process completed.")

    def standardize_terms(self, text: str, term_types: Dict[str, str]) -> List[Dict]:
        """
        标准化文本中的金融术语

        Args:
            text: 输入文本
            term_types: 术语类型过滤，只保留FINTERM

        Returns:
            标准化结果列表
        """
        # 提取文本中的潜在金融术语
        potential_terms = self._extract_terms(text)
        if not potential_terms:
            return []

        print(potential_terms)

        # 标准化每个术语
        standardized_results = []
        for term in potential_terms:
            std_result = self.search_similar_terms(term)
            standardized_results.append(
                {
                    "original_term": term,
                    "entity_group": self._infer_term_type(term),
                    "standardized_results": std_result,
                }
            )

        return standardized_results

    def search_similar_terms(self, query: str, limit: int = 5) -> List[Dict]:
        """
        搜索与查询文本相似的金融术语

        Args:
            query: 查询文本
            limit: 返回结果的最大数量

        Returns:
            包含相似术语信息的列表，每个术语包含:
            - concept_name: 标准术语名称
            - term_type: 术语类型
            - distance: 与查询的相似度距离
        """
        # 获取查询的向量表示
        query_embedding = self.embedding_func.embed_query(query)

        # 设置搜索参数
        search_params = {
            "collection_name": self.collection_name,
            "data": [query_embedding],
            "limit": limit,
            "output_fields": [
                "concept_name",
                "term_type",
            ],
        }

        # 搜索相似项
        search_result = self.client.search(**search_params)

        results = []
        for hit in search_result[0]:
            entity = hit["entity"]
            # 处理同义词格式
            synonyms = (
                entity.get("synonyms", "").split("|") if entity.get("synonyms") else []
            )

            results.append(
                {
                    "concept_name": entity.get("concept_name"),
                    "term_type": entity.get("term_type"),
                    "distance": float(hit["distance"]),
                }
            )

        return results

    def _extract_terms(self, text: str) -> List[str]:
        """从文本中提取潜在的金融术语"""
        # 使用简单的正则表达式提取可能的术语
        # 实际应用中可能需要更复杂的NLP处理
        pattern = r"\b[A-Za-z0-9\u4e00-\u9fa5]+(?:[\s\-][A-Za-z0-9\u4e00-\u9fa5]+)*\b"
        terms = re.findall(pattern, text)

        # 过滤太短的术语
        filtered_terms = [term.strip() for term in terms if len(term.strip()) > 1]

        # 去重
        unique_terms = list(set(filtered_terms))

        return unique_terms

        ### 暂不使用金融NER模型，测试提取错误。
        # """使用本地金融NER模型从文本中提取金融术语"""
        # try:
        #     # 导入必要的库
        #     from transformers import pipeline

        #     # 使用本地缓存的金融NER模型
        #     ner_pipeline = pipeline(
        #         "ner",
        #         model="./finbert-ner",  # 使用本地模型路径
        #         tokenizer="./finbert-ner",
        #         aggregation_strategy="simple",
        #         device=-1,
        #     )

        #     # 使用金融NER模型提取金融实体
        #     entities = ner_pipeline(text)

        #     # 提取金融相关的实体
        #     financial_terms = []
        #     for entity in entities:
        #         term = entity["word"].strip()
        #         # 过滤掉过短的术语
        #         if len(term) > 1:
        #             financial_terms.append(term)

        #     # 去重并返回
        #     return list(set(financial_terms))

        # except Exception as e:
        #     logger.error(f"本地金融NER模型提取失败: {str(e)}")

    def _infer_term_type(self, term: str) -> str:
        """推断术语类型，始终返回FINTERM"""
        return "FINTERM"

    def __del__(self):
        """清理资源，释放集合"""
        if hasattr(self, "client") and hasattr(self, "collection_name"):
            self.client.release_collection(self.collection_name)
