import yaml
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from typing import Dict, List, Tuple
import re
import os


class FieldSimilarityAnalyzer:
    def __init__(self, yaml_file_path: str):
        """
        初始化字段相似性分析器

        Args:
            yaml_file_path: YAML文件路径
        """
        self.yaml_file_path = yaml_file_path
        self.table_data = []
        self.field_vectors = []
        self.field_descriptions = []
        self.vectorizer = TfidfVectorizer(
            stop_words='english',
            ngram_range=(1, 2),
            max_features=1000,
            lowercase=True
        )

    def load_yaml(self) -> List[Dict]:
        """加载并解析YAML文件"""
        try:
            with open(self.yaml_file_path, 'r', encoding='utf-8') as file:
                data = yaml.safe_load(file)
                print(f"成功加载YAML文件，包含 {len(data)} 个表")
                return data
        except Exception as e:
            print(f"加载YAML文件失败: {e}")
            return []

    def build_field_description(self, field: Dict, table_name: str) -> str:
        """
        构建字段的语义描述，用于向量化

        Args:
            field: 字段信息字典
            table_name: 表名

        Returns:
            字段的综合描述字符串
        """
        description_parts = []

        # 字段名
        field_name = field.get('name', '')
        description_parts.append(field_name)

        # 字段类型
        field_type = field.get('type', '')
        description_parts.append(field_type)

        # 注释 (中英文)
        comment = field.get('comment', '')
        if comment:
            # 提取中文和英文部分
            chinese_part = re.findall(r'[\u4e00-\u9fff]+', comment)
            english_part = re.sub(r'[\u4e00-\u9fff|]+', '', comment).strip()
            description_parts.extend(chinese_part)
            if english_part:
                description_parts.append(english_part)

        # 主键/外键信息
        if field.get('primary_key', False):
            description_parts.append('primary key identifier')
        if field.get('foreign_key', False):
            description_parts.append('foreign key reference')

        # 标签信息
        tags = field.get('tags', {})
        if tags:
            semantic_role = tags.get('semantic_role', '')
            if semantic_role:
                description_parts.append(semantic_role)

            usage_context = tags.get('usage_context', '')
            if usage_context:
                description_parts.append(usage_context)

        # 可空性
        if not field.get('nullable', True):
            description_parts.append('not null required')

        return ' '.join(description_parts)

    def extract_fields_info(self) -> List[Dict]:
        """提取所有表的字段信息"""
        tables = self.load_yaml()
        fields_info = []

        for table in tables:
            table_name = table.get('table', '')
            table_domain = table.get('domain', '')
            table_desc = table.get('description', '')

            columns = table.get('columns', [])
            for column in columns:
                field_desc = self.build_field_description(column, table_name)

                field_info = {
                    'table_name': table_name,
                    'table_domain': table_domain,
                    'table_description': table_desc,
                    'field_name': column.get('name', ''),
                    'field_type': column.get('type', ''),
                    'nullable': column.get('nullable', True),
                    'comment': column.get('comment', ''),
                    'primary_key': column.get('primary_key', False),
                    'foreign_key': column.get('foreign_key', False),
                    'semantic_role': column.get('tags', {}).get('semantic_role', ''),
                    'usage_context': column.get('tags', {}).get('usage_context', ''),
                    'data_sensitivity': column.get('tags', {}).get('data_sensitivity', ''),
                    'field_description': field_desc
                }
                fields_info.append(field_info)

        print(f"提取了 {len(fields_info)} 个字段信息")
        return fields_info

    def vectorize_fields(self, fields_info: List[Dict]) -> np.ndarray:
        """将字段描述向量化"""
        descriptions = [field['field_description'] for field in fields_info]
        self.field_descriptions = descriptions

        try:
            vectors = self.vectorizer.fit_transform(descriptions)
            print(f"成功创建向量矩阵，维度: {vectors.shape}")
            return vectors.toarray()
        except Exception as e:
            print(f"向量化失败: {e}")
            return np.array([])

    def find_similar_fields(self, fields_info: List[Dict], similarity_threshold: float = 0.3) -> List[Dict]:
        """
        找出相似的字段对

        Args:
            fields_info: 字段信息列表
            similarity_threshold: 相似度阈值

        Returns:
            相似字段对列表
        """
        vectors = self.vectorize_fields(fields_info)
        if vectors.size == 0:
            return []

        # 计算余弦相似度矩阵
        similarity_matrix = cosine_similarity(vectors)

        similar_pairs = []
        n_fields = len(fields_info)

        for i in range(n_fields):
            for j in range(i + 1, n_fields):
                # 跳过同一表中的字段
                if fields_info[i]['table_name'] == fields_info[j]['table_name']:
                    continue

                similarity_score = similarity_matrix[i][j]

                if similarity_score >= similarity_threshold:
                    similar_pair = {
                        'field1_table': fields_info[i]['table_name'],
                        'field1_name': fields_info[i]['field_name'],
                        'field1_type': fields_info[i]['field_type'],
                        'field1_comment': fields_info[i]['comment'],
                        'field1_semantic_role': fields_info[i]['semantic_role'],
                        'field2_table': fields_info[j]['table_name'],
                        'field2_name': fields_info[j]['field_name'],
                        'field2_type': fields_info[j]['field_type'],
                        'field2_comment': fields_info[j]['comment'],
                        'field2_semantic_role': fields_info[j]['semantic_role'],
                        'similarity_score': round(similarity_score, 4)
                    }
                    similar_pairs.append(similar_pair)

        # 按相似度降序排列
        similar_pairs.sort(key=lambda x: x['similarity_score'], reverse=True)
        print(f"找到 {len(similar_pairs)} 对相似字段")

        return similar_pairs

    def export_results(self, similar_pairs: List[Dict], output_file: str = 'similar_fields.csv'):
        """将结果导出为CSV文件"""
        if not similar_pairs:
            print("没有找到相似字段，无法导出")
            return

        df = pd.DataFrame(similar_pairs)
        df.to_csv(output_file, index=False, encoding='utf-8-sig')
        print(f"结果已导出到: {output_file}")

        # 显示统计信息
        print(f"\n=== 相似字段分析统计 ===")
        print(f"总计相似字段对: {len(similar_pairs)}")
        print(f"平均相似度: {df['similarity_score'].mean():.4f}")
        print(f"最高相似度: {df['similarity_score'].max():.4f}")
        print(f"最低相似度: {df['similarity_score'].min():.4f}")

        # 显示前10个最相似的字段对
        print(f"\n=== 前10个最相似的字段对 ===")
        for i, pair in enumerate(similar_pairs[:10]):
            print(f"{i + 1}. [{pair['field1_table']}.{pair['field1_name']}] <-> "
                  f"[{pair['field2_table']}.{pair['field2_name']}] "
                  f"(相似度: {pair['similarity_score']})")

    def analyze(self, similarity_threshold: float = 0.3, output_file: str = 'similar_fields.csv'):
        """执行完整的字段相似性分析"""
        print("开始字段相似性分析...")

        # 1. 提取字段信息
        fields_info = self.extract_fields_info()
        if not fields_info:
            print("无法提取字段信息")
            return

        # 2. 找出相似字段
        similar_pairs = self.find_similar_fields(fields_info, similarity_threshold)

        # 3. 导出结果
        self.export_results(similar_pairs, output_file)

        return similar_pairs


def main():
    """主函数"""
    # 使用提供的YAML文件路径
    yaml_file = './data/test.yaml'

    # 检查文件是否存在
    if not os.path.exists(yaml_file):
        print(f"文件 {yaml_file} 不存在，请检查文件路径")
        return

    # 创建分析器实例
    analyzer = FieldSimilarityAnalyzer(yaml_file)

    # 执行分析
    # 可以调整相似度阈值，默认0.3表示30%以上相似度
    similar_pairs = analyzer.analyze(similarity_threshold=0.5, output_file='similar_fields.csv')

    print("\n分析完成！")


if __name__ == "__main__":
    main()