import os
import pickle
import re
import warnings
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from typing import List, Dict, Any

import faiss
import jellyfish
import numpy as np
import pandas as pd
import yaml
# 可选：如果需要使用OpenAI或其他大模型的embedding
# from openai import OpenAI
# 向量化和相似度计算相关库
from sentence_transformers import SentenceTransformer

# 忽略不必要的警告
warnings.filterwarnings("ignore")


class VectorBasedFieldAnalyzer:
    """
    基于向量数据库和混合评分的字段相似度分析器。

    该分析器结合了语义向量相似度和字段类型匹配度，
    以提供更精确、可控的字段相似性分析。
    """

    def __init__(
            self,
            yaml_file_path: str,
            output_base_dir: str = "./results",
            embedding_model: str = "all-MiniLM-L6-v2",
            vector_cache_dir: str = "./vector_cache",
            use_openai: bool = False,
            openai_api_key: str = None,
    ):
        """
        初始化分析器。

        Args:
            yaml_file_path (str): 描述数据库模式的YAML文件路径。
            output_base_dir (str): 输出结果的基础目录。
            embedding_model (str): 使用的本地SentenceTransformer模型名称。
            vector_cache_dir (str): 向量缓存目录。
            use_openai (bool): 是否使用OpenAI的embedding API。
            openai_api_key (str): OpenAI API密钥。
        """
        self.yaml_file_path = yaml_file_path
        self.output_base_dir = output_base_dir
        self.vector_cache_dir = vector_cache_dir
        self.use_openai = use_openai
        self.tables_data = []
        self.all_fields = []

        # 确保相关目录存在
        Path(self.output_base_dir).mkdir(parents=True, exist_ok=True)
        Path(self.vector_cache_dir).mkdir(parents=True, exist_ok=True)

        # 初始化Embedding模型
        if use_openai and openai_api_key:
            # self.openai_client = OpenAI(api_key=openai_api_key)
            self.embedding_model = None
            print("正在配置使用OpenAI embedding API...")
        else:
            print(f"正在加载本地embedding模型: {embedding_model}...")
            self.embedding_model = SentenceTransformer(embedding_model)
            print("模型加载完成。")

        # 向量数据库相关属性
        self.field_embeddings = None
        self.faiss_index = None
        self.embedding_cache_file = os.path.join(
            vector_cache_dir, "field_embeddings.pkl"
        )

    # --- 数据加载与准备 ---

    def load_data(self):
        """从YAML文件加载并解析数据库模式数据。"""
        print(f"正在从 {self.yaml_file_path} 加载数据...")
        with open(self.yaml_file_path, "r", encoding="utf-8") as file:
            self.tables_data = yaml.safe_load(file)

        # 提取所有字段信息，并扁平化存储
        for table in self.tables_data:
            table_name = table.get("table", "unknown_table")
            domain = table.get("domain", "unknown_domain")
            for column in table.get("columns", []):
                field_info = {
                    "table": table_name,
                    "domain": domain,
                    "field_name": column.get("name"),
                    "type": column.get("type"),
                    "comment": column.get("comment", ""),
                    "semantic_role": column.get("tags", {}).get("semantic_role", ""),
                    "nullable": column.get("nullable", True),
                    "primary_key": column.get("primary_key", False),
                    "foreign_key": column.get("foreign_key", False),
                }
                self.all_fields.append(field_info)

        print(f"数据加载完成：共 {len(self.tables_data)} 张表，{len(self.all_fields)} 个字段。")

    def generate_field_embeddings(self, force_refresh: bool = False) -> np.ndarray:
        """为所有字段生成或从缓存加载embedding向量。"""
        if not force_refresh and os.path.exists(self.embedding_cache_file):
            print("从缓存加载embedding向量...")
            with open(self.embedding_cache_file, "rb") as f:
                cached_data = pickle.load(f)
                self.field_embeddings = cached_data['embeddings']
                print("向量加载完成。")
                return self.field_embeddings

        print("正在为所有字段生成embedding向量...")
        embeddings = []
        total_fields = len(self.all_fields)
        for i, field in enumerate(self.all_fields):
            if (i + 1) % 1000 == 0 or i == total_fields - 1:
                print(f"处理进度: {i + 1}/{total_fields}")

            description = self._create_field_description(field)

            if self.use_openai:
                embedding = self._get_openai_embedding(description)
            else:
                embedding = self._get_local_embedding(description)
            embeddings.append(embedding)

        self.field_embeddings = np.vstack(embeddings).astype('float32')

        # 缓存结果
        print("正在缓存生成的向量...")
        cache_data = {'embeddings': self.field_embeddings}
        with open(self.embedding_cache_file, "wb") as f:
            pickle.dump(cache_data, f)

        print(f"Embedding向量生成并缓存完成，维度: {self.field_embeddings.shape}")
        return self.field_embeddings

    def build_vector_index(self):
        """使用FAISS为所有字段向量构建一个高效的索引。"""
        if self.field_embeddings is None:
            raise ValueError("在构建索引前，必须先生成或加载Embeddings。")

        print("正在构建FAISS向量索引...")
        dimension = self.field_embeddings.shape[1]

        # 关键步骤：在添加前对所有向量进行L2范数归一化。
        # 这是后续将L2距离正确转换为余弦相似度的前提。
        faiss.normalize_L2(self.field_embeddings)

        # 使用HNSW索引，适合高维向量的快速近似最近邻搜索
        self.faiss_index = faiss.IndexHNSWFlat(dimension, 32)
        self.faiss_index.hnsw.efConstruction = 200
        self.faiss_index.hnsw.efSearch = 100

        # 添加归一化后的向量到索引
        self.faiss_index.add(self.field_embeddings)

        print(f"向量索引构建完成，共包含 {self.faiss_index.ntotal} 个向量。")

    # --- 核心分析逻辑 ---

    def run_vector_analysis(
            self,
            threshold: float = 0.6,
            max_workers: int = 4,
            force_refresh_embeddings: bool = False,
            type_weight: float = 0.4
    ):
        """
        运行完整的分析流程：加载数据 -> 生成向量 -> 构建索引 -> 并行分析 -> 生成报告。
        """
        # 1. 加载数据
        self.load_data()

        # 2. 生成或加载向量
        self.generate_field_embeddings(force_refresh_embeddings)

        # 3. 构建向量索引
        self.build_vector_index()

        # 4. 按领域分组，准备并行处理
        domain_fields = self._get_fields_by_domain()
        print(f"发现 {len(domain_fields)} 个业务域，准备开始并行分析...")

        results = {}
        # 5. 使用线程池并行处理每个领域
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_domain = {
                executor.submit(self._process_domain_vector, domain, fields, threshold, type_weight): domain
                for domain, fields in domain_fields.items()
            }

            for future in as_completed(future_to_domain):
                domain = future_to_domain[future]
                try:
                    domain_summary = future.result()
                    results[domain] = domain_summary
                    self._print_domain_summary(domain_summary)
                except Exception as exc:
                    print(f'领域 {domain} 处理时发生严重异常: {exc}')

        # 6. 打印总体摘要
        self._print_overall_summary(results)
        return results

    def _process_domain_vector(
            self,
            domain: str,
            domain_fields: List[Dict],
            threshold: float,
            type_weight: float
    ) -> Dict[str, Any]:
        """【高级评分核心】处理单个领域，结合向量、类型和名称相似度进行重排序。"""
        print(f"开始处理领域: {domain} (类型权重: {type_weight})")
        vector_weight = 1.0 - type_weight
        domain_indices = {i: field for i, field in enumerate(self.all_fields) if field['domain'] == domain}

        similar_pairs = []
        for i, field in domain_indices.items():
            query_vector = self.field_embeddings[i].reshape(1, -1)
            distances, indices = self.faiss_index.search(query_vector, 100)

            for dist, idx in zip(distances[0], indices[0]):
                if idx <= i: continue

                vector_similarity = 1 - (dist / 2)
                if vector_similarity < threshold: continue

                similar_field = self.all_fields[int(idx)]
                if field['table'] == similar_field['table'] or field['domain'] != similar_field['domain']:
                    continue

                # todo 过滤掉字段不一样的
                if (field['table'] == similar_field['table'] or
                        field['domain'] != similar_field['domain'] or
                        field['field_name'].lower() == similar_field['field_name'].lower()):
                    continue

                # --- 新的高级类型与名称相似度评分逻辑 ---
                type_similarity = self._calculate_advanced_type_similarity(field, similar_field)
                overall_similarity = (vector_weight * vector_similarity) + (type_weight * type_similarity)
                # --- 逻辑结束 ---

                similar_pairs.append({
                    'field1': field, 'field2': similar_field,
                    'similarities': {
                        'vector_similarity': round(float(vector_similarity), 4),
                        'type_similarity': round(float(type_similarity), 4),
                        'overall_similarity': round(float(overall_similarity), 4),
                    },
                })

        similar_pairs.sort(key=lambda x: x['similarities']['overall_similarity'], reverse=True)
        exported_files = self._export_results_to_csv(similar_pairs, domain)

        return {
            'domain': domain, 'total_fields': len(domain_fields),
            'similar_pairs_count': len(similar_pairs),
            'high_similarity': len([p for p in similar_pairs if p['similarities']['overall_similarity'] >= 0.8]),
            'medium_similarity': len(
                [p for p in similar_pairs if 0.6 <= p['similarities']['overall_similarity'] < 0.8]),
            'low_similarity': len([p for p in similar_pairs if p['similarities']['overall_similarity'] < 0.6]),
            'exported_files': exported_files,
        }

    # --- 辅助与工具方法 ---

    def _calculate_advanced_type_similarity(self, field1: dict, field2: dict) -> float:
        """根据用户的高级规则计算类型相似度，该方法同时考虑类型和名称。"""
        type1 = (field1.get('type') or "").upper()
        type2 = (field2.get('type') or "").upper()
        name1 = field1.get('field_name') or ""
        name2 = field2.get('field_name') or ""

        # Helper to parse VARCHAR(length)
        def parse_varchar(type_str):
            match = re.match(r'VARCHAR\((\d+)\)', type_str)
            return int(match.group(1)) if match else None

        len1, len2 = parse_varchar(type1), parse_varchar(type2)
        base_type1 = re.match(r'^[A-Z]+', type1).group(0) if re.match(r'^[A-Z]+', type1) else type1
        base_type2 = re.match(r'^[A-Z]+', type2).group(0) if re.match(r'^[A-Z]+', type2) else type2

        numeric_family = ['INT', 'BIGINT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DOUBLE', 'SMALLINT', 'TINYINT']
        string_family = ['VARCHAR', 'CHAR', 'TEXT', 'STRING']

        # 规则 1: 都在字符串家族
        if base_type1 in string_family and base_type2 in string_family:
            # 严谨VARCHAR长度匹配
            if len1 is not None and len1 < 255 and len2 is not None and len2 < 255:
                return 1.0 if len1 == len2 else 0.1
            return 0.9  # 其他情况，如一个TEXT一个VARCHAR，或含VARCHAR(255)

        # 规则 2: 都在数字家族
        if base_type1 in numeric_family and base_type2 in numeric_family:
            return 0.95

        # 规则 3: 跨家族，但名称可能高度相似 (e.g., VARCHAR vs INT)
        is_str_num_pair = (base_type1 in string_family and base_type2 in numeric_family) or \
                          (base_type1 in numeric_family and base_type2 in string_family)
        if is_str_num_pair:
            name_similarity = self._get_name_similarity(name1, name2)
            if name_similarity > 0.92:  # 使用一个较高的名称相似度阈值
                return 0.8  # 高分，因为名称暗示了它们是同一概念的不同实现

        return 0.0  # 其他所有不匹配情况

    def _get_name_similarity(self, name1: str, name2: str) -> float:
        """使用Jaro-Winkler算法计算字段名相似度。"""
        if not name1 or not name2: return 0.0
        return jellyfish.jaro_winkler_similarity(name1.lower(), name2.lower())

    def _get_fields_by_domain(self) -> Dict[str, List[Dict]]:
        """按领域对所有字段进行分组。"""
        domain_fields = defaultdict(list)
        for field in self.all_fields:
            domain_fields[field["domain"]].append(field)
        return dict(domain_fields)

    def _get_type_similarity(self, type1: str, type2: str) -> float:
        """【混合评分关键】计算两个字段类型的相似度得分 (0.0 - 1.0)。"""
        type1 = type1.upper() if type1 else ""
        type2 = type2.upper() if type2 else ""

        base_type1 = re.match(r'^[A-Z]+', type1).group(0) if re.match(r'^[A-Z]+', type1) else type1
        base_type2 = re.match(r'^[A-Z]+', type2).group(0) if re.match(r'^[A-Z]+', type2) else type2

        if base_type1 == base_type2:
            return 1.0

        numeric_family = ['INT', 'BIGINT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DOUBLE', 'SMALLINT', 'TINYINT']
        string_family = ['VARCHAR', 'CHAR', 'TEXT', 'STRING']
        date_family = ['DATE', 'TIMESTAMP', 'TIME', 'DATETIME']

        families = [numeric_family, string_family, date_family]
        for family in families:
            if base_type1 in family and base_type2 in family:
                return 0.8  # 同族但不同类型，给予较高的分数

        return 0.0  # 完全不同族

    def _create_field_description(self, field: Dict) -> str:
        """创建用于生成向量的字段综合描述文本。"""
        parts = []
        parts.append(f"字段名: {self._make_field_name_readable(field['field_name'])}")
        parts.append(f"数据类型: {self._get_type_semantic_description(field['type'])}")
        if field['comment']: parts.append(f"注释: {field['comment']}")
        if field['semantic_role']: parts.append(f"语义角色: {field['semantic_role']}")

        constraints = []
        if field['primary_key']: constraints.append("主键")
        if field['foreign_key']: constraints.append("外键")
        if not field['nullable']: constraints.append("非空")
        if constraints: parts.append(f"约束: {', '.join(constraints)}")

        parts.append(f"业务域: {field['domain']}")
        parts.append(f"所属表: {field['table']}")
        return " | ".join(parts)

    def _make_field_name_readable(self, field_name: str) -> str:
        """将下划线或中划线分隔的字段名转换为更可读的自然语言短语。"""
        if not field_name: return ""
        parts = re.split(r'[_\-]', field_name.lower())
        abbreviation_map = {
            'id': 'identifier', 'no': 'number', 'nm': 'name', 'dt': 'date', 'tm': 'time',
            'amt': 'amount', 'qty': 'quantity', 'desc': 'description', 'addr': 'address',
            'tel': 'telephone', 'acct': 'account', 'cust': 'customer', 'prod': 'product',
            'ord': 'order', 'seq': 'sequence', 'ref': 'reference', 'stat': 'status',
            'cfg': 'configuration', 'param': 'parameter', 'temp': 'temporary', 'max': 'maximum',
            'min': 'minimum', 'cnt': 'count', 'src': 'source', 'dest': 'destination',
            'orig': 'original', 'curr': 'current', 'prev': 'previous'
        }
        return " ".join([abbreviation_map.get(part, part) for part in parts])

    def _get_type_semantic_description(self, data_type: str) -> str:
        """将技术性的数据类型转换为业务语义描述。"""
        if not data_type: return "未知类型"
        dt = data_type.upper()
        if any(s in dt for s in ['CHAR', 'TEXT', 'STRING']): return "文本字符串类型，用于存储名称、描述等文字信息"
        if any(s in dt for s in ['INT', 'BIGINT']): return "整数类型，用于存储ID、计数、序号等数值"
        if any(s in dt for s in ['DECIMAL', 'NUMERIC', 'FLOAT', 'DOUBLE']): return "数值类型，用于存储金额、比率等精确数值"
        if any(s in dt for s in ['DATE', 'TIMESTAMP', 'TIME']): return "时间类型，用于存储时间戳、操作时间等信息"
        if any(s in dt for s in ['BOOLEAN', 'BIT']): return "布尔类型，用于存储是否、状态标志等二元信息"
        return f"数据类型 {data_type}"

    def _get_local_embedding(self, text: str) -> np.ndarray:
        """使用本地模型获取文本的embedding向量。"""
        try:
            return self.embedding_model.encode(text, convert_to_numpy=True)
        except Exception as e:
            print(f"本地embedding获取失败: {e}")
            return np.zeros(self.embedding_model.get_sentence_embedding_dimension())

    def _get_openai_embedding(self, text: str) -> np.ndarray:
        """使用OpenAI API获取文本的embedding向量。"""
        try:
            # response = self.openai_client.embeddings.create(
            #     input=text,
            #     model="text-embedding-3-small" # 或其他模型
            # )
            # return np.array(response.data[0].embedding)
            raise NotImplementedError("OpenAI客户端未完全实现，请取消注释并配置。")
        except Exception as e:
            print(f"OpenAI embedding获取失败: {e}")
            return np.zeros(1536)

    def _export_results_to_csv(
            self, similar_pairs: List[Dict], domain: str, batch_size: int = 5000
    ) -> List[str]:
        """将相似字段对分批导出到CSV文件。"""
        if not similar_pairs:
            return []

        domain_dir = os.path.join(self.output_base_dir, domain)
        Path(domain_dir).mkdir(parents=True, exist_ok=True)

        exported_files = []
        for batch_num, i in enumerate(range(0, len(similar_pairs), batch_size), 1):
            batch_pairs = similar_pairs[i:i + batch_size]

            results_data = []
            for pair in batch_pairs:
                row = {
                    'Table1': pair['field1']['table'], 'Domain1': pair['field1']['domain'],
                    'Field1': pair['field1']['field_name'], 'Type1': pair['field1']['type'],
                    'Comment1': pair['field1']['comment'], 'SemanticRole1': pair['field1']['semantic_role'],
                    'Table2': pair['field2']['table'], 'Domain2': pair['field2']['domain'],
                    'Field2': pair['field2']['field_name'], 'Type2': pair['field2']['type'],
                    'Comment2': pair['field2']['comment'], 'SemanticRole2': pair['field2']['semantic_role'],
                    'OverallSimilarity': pair['similarities']['overall_similarity'],
                    'VectorSimilarity': pair['similarities']['vector_similarity'],
                    'TypeSimilarity': pair['similarities']['type_similarity'],
                }
                results_data.append(row)

            noSameField_dir = os.path.join(domain_dir, "noSameField")
            Path(noSameField_dir).mkdir(parents=True, exist_ok=True)
            output_file = os.path.join(noSameField_dir, f'similarity_results_{domain}_{batch_num:03d}.csv')
            df = pd.DataFrame(results_data)
            df.to_csv(output_file, index=False, encoding='utf-8-sig')

            exported_files.append(output_file)
            print(f"成功导出批次 {batch_num} 到: {output_file} ({len(batch_pairs)} 条记录)")

        return exported_files

    def _print_domain_summary(self, summary: Dict[str, Any]):
        """打印单个领域的分析摘要。"""
        print(f"\n--- {summary['domain']} 领域分析摘要 ---")
        print(f"  - 总字段数: {summary['total_fields']}")
        print(f"  - 发现相似字段对: {summary['similar_pairs_count']}")
        print(f"  - 高相似度 (≥0.8): {summary['high_similarity']} 对")
        print(f"  - 中等相似度 (0.6-0.8): {summary['medium_similarity']} 对")
        print(f"  - 导出文件数: {len(summary['exported_files'])}")

    def _print_overall_summary(self, results: Dict[str, Any]):
        """打印所有领域完成后的总体分析摘要。"""
        print("\n" + "=" * 50)
        print("=== 总体分析摘要 ===")
        print("=" * 50)
        total_fields = sum(r['total_fields'] for r in results.values())
        total_pairs = sum(r['similar_pairs_count'] for r in results.values())

        print(f"处理领域总数: {len(results)}")
        print(f"处理字段总数: {total_fields}")
        print(f"发现相似字段对总数: {total_pairs}")

        print("\n--- 各领域详情 ---")
        for domain, summary in results.items():
            print(f"  - {domain}: {summary['similar_pairs_count']} 对, "
                  f"导出至 {len(summary['exported_files'])} 个文件。")
        print("=" * 50)

    def semantic_search(self, query: str, top_k: int = 10) -> List[Dict]:
        """
        【已修复】通过自然语言描述来查找最相关的数据库字段。
        """
        if self.faiss_index is None:
            print("错误：必须先运行分析 (run_vector_analysis) 来构建索引。")
            return []

        print(f"\n正在执行语义搜索: '{query}'...")

        # 生成查询向量并归一化
        query_vector = self._get_local_embedding(query).reshape(1, -1).astype('float32')
        faiss.normalize_L2(query_vector)

        # 搜索相似向量
        distances, indices = self.faiss_index.search(query_vector, top_k)

        results = []
        for dist, idx in zip(distances[0], indices[0]):
            field = self.all_fields[int(idx)]
            # 将距离转换为相似度
            similarity = 1 - (dist / 2)
            result = {
                'field': field,
                'similarity': round(float(similarity), 4),
                'description': self._create_field_description(field)
            }
            results.append(result)

        return results


# --- 主程序入口 ---
if __name__ == "__main__":
    # 1. 初始化分析器
    # 注意：请将 yaml_file_path 指向您自己的数据文件
    analyzer = VectorBasedFieldAnalyzer(
        yaml_file_path="./data/result_ens.yaml",
        output_base_dir="./data/vector_results",
        embedding_model="all-MiniLM-L6-v2",
        use_openai=False,
    )

    # 2. 运行完整分析
    # 您可以在这里调整关键参数
    analysis_results = analyzer.run_vector_analysis(
        threshold=0.6,  # 初步向量相似度过滤阈值
        max_workers=8,  # 并行处理的线程数
        force_refresh_embeddings=False,  # 是否强制重新生成向量（第一次运行时应为False或True）
        type_weight=0.2  # <-- 在此调整类型相似度的权重！
        # 0.0 表示完全不考虑类型
        # 0.4 表示类型相似度占40%权重
        # 1.0 表示只按类型相似度排序（不推荐）
    )

    # 3. (可选) 进行语义搜索示例
    print("\n--- 语义搜索示例 ---")
    search_results = analyzer.semantic_search("客户的唯一身份标识", top_k=5)
    if search_results:
        for i, res in enumerate(search_results, 1):
            print(f"{i}. 字段: {res['field']['table']}.{res['field']['field_name']}")
            print(f"   相似度: {res['similarity']}")
            print(f"   描述: {res['description']}\n")

    print("=== 所有处理任务完成 ===")
