"""
分析字段共现情况
基于 trace 数据，统计字段值的共现模式。
然后根据字段的权重，得出字段的关联矩阵。
"""

import math
from collections import defaultdict
from typing import Dict, List


class FieldCooccurrenceAnalyzer:
    """字段共现分析器"""

    def __init__(self, weight_method: str = 'entropy'):
        """
        初始化分析器

        Args:
            weight_method: 字段权重计算方式，可选值: 'idf', 'entropy', 'bm25'
        """
        self.weight_method = weight_method

        # value 正向索引 {process: {operation: {field: [values]}}}
        self.field_value_index = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))

        # value 反向索引 {value: set(Field)}
        self.value_field_index = defaultdict(set)

        # op 级别依赖采用大模型输出，用在共现分析环节
        # operation 依赖图 {process: [(src_op, tgt_op)]}
        self.operation_dependencies = defaultdict(list[tuple])
        # operation 数据依赖 {process: [op]}
        self.data_dependencies = defaultdict(list)

        # filed 信息量列表 {process: {operation: {field: weight}}}
        self.field_weights = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))

        # field 依赖数据由大模型提供，暂时无用
        self.field_dependencies = None

        # 不同的信息量表征 field_priority
        self.field_idf = {}  # 字段的 IDF 权重
        # self.field_entropy = {}  # 字段的信息熵
        # 现在直接操作 self.field_weights，做完对比实验之后。
        self.field_bm25 = {}  # 字段的 BM25 权重

    def load_field_dependencies(self):
        """加载 checkout 服务的依赖关系。字段之间的生产消费关系。"""
        self.field_dependencies = [
            ("PlaceOrderRequest", "CartService.GetCart", "user_id", "user_id"),
            ("ProductCatalogService.GetProduct", "CurrencyService.Convert", "price_usd", "from"),
            ("PlaceOrderRequest", "CurrencyService.Convert", "user_currency", "to_code"),
            ("PlaceOrderRequest", "ShippingService.GetQuote", "address", "address"),
            ("ShippingService.GetQuote", "CurrencyService.Convert", "cost_usd", "from"),
            ("PlaceOrderRequest", "CurrencyService.Convert", "user_currency", "to_code"),
            ("PlaceOrderRequest", "PaymentService.Charge", "credit_card", "credit_card"),
            ("PlaceOrderRequest", "ShippingService.ShipOrder", "address", "address"),
            ("PlaceOrderRequest", "CartService.EmptyCart", "user_id", "user_id"),
            ("PlaceOrderRequest", "EmailService.SendOrderConfirmation", "email", "email"),
        ]

    def load_operation_dependencies(self):
        """
        otel-demo 应用的操作依赖关系，格式为 (src_op, tgt_op)。
        # todo 支持从文件加载（python 列表、开放的 DAG 格式、json 自定义格式）。
        """
        checkout_operation_dependencies = [
            ("oteldemo.CheckoutService/PlaceOrder", "checkout.getUserCart"),  # getUserCart 直接依赖于 placeOrder。
            ("oteldemo.CheckoutService/PlaceOrder", "checkout.quoteShipping"),  # quoteShipping 直接依赖于 placeOrder。
            ("oteldemo.CheckoutService/PlaceOrder", "checkout.convertCurrency"),  # convertCurrency 直接依赖于 placeOrder。

            # 下游操作之间的数据依赖，quoteShipping 向 convertCurrency 传递了 shippingUSD。
            # ("checkout.quoteShipping", "checkout.convertCurrency"),

            ("oteldemo.CheckoutService/PlaceOrder", "checkout.chargeCard"),  # chargeCard 直接依赖于 placeOrder。

            ("oteldemo.CheckoutService/PlaceOrder", "checkout.shipOrder"),  # shipOrder 直接依赖于 placeOrder。

            ("oteldemo.CheckoutService/PlaceOrder", "checkout.emptyUserCart"),  # emptyUserCart 直接依赖于 placeOrder。

            ("oteldemo.CheckoutService/PlaceOrder", "checkout.sendOrderConfirmation"),  # 直接依赖于 placeOrder。

            ("oteldemo.CheckoutService/PlaceOrder", "orders publish"),  # 来自 Kafka，直接依赖于 placeOrder。
        ]

        frontend_operation_dependencies = [
            # 有效的操作依赖。
            ("GET /api/recommendations", "gRPC RecommendationService/listRecommendations"),
            ("GET /api/recommendations", "gRPC ProductCatalogService/getProduct"),
            ("GET /api/recommendations", "gRPC CurrencyService/convert"),

            ("GET /api/cart", "gRPC CartService/getCart"),

            ("GET /api/data", "gRPC AdService/getAds"),  # 这里的 data 就是指 ads。

            ("GET /api/products/{productId}", "gRPC ProductCatalogService/getProduct"),  # 单个商品

            # 无效的操作依赖。
            ("GET /api/products", "gRPC ProductCatalogService/listProducts"),  # 没有字段依赖，因为操作不带参数，content 为空。
            ("GET /api/currency", "gRPC CurrencyService/getSupportedCurrencies"),  # 操作不带请求参数

            # ("GET", ""), # GET 探活，不考虑。
        ]

        recommendation_operation_dependencies = [
            ("/oteldemo.RecommendationService/ListRecommendations", "/oteldemo.ProductCatalogService/ListProducts")
            # 推荐列表
        ]

        cart_operation_dependencies = [
            ("POST /oteldemo.CartService/GetCart", "HGET"),  # 查购物车
            ("POST /oteldemo.CartService/EmptyCart", "HMSET"),  # 清购物车
            ("POST /oteldemo.CartService/EmptyCart", "EXPIRE"),
        ]

        shipping_operation_dependencies = [
            ("shipping.getQuote", "shipping.createQuote"),  # 查时创建报价
        ]

        self.operation_dependencies = {
            "checkout": checkout_operation_dependencies,
            "frontend": frontend_operation_dependencies,
            "recommendation": recommendation_operation_dependencies,
            "cart": cart_operation_dependencies,
            "shipping": shipping_operation_dependencies}

    def load_data_dependencies(self):
        """操作之间的数据依赖链，顺序从 out_op 到 in_op。"""
        self.data_dependencies = {
            "checkout": ["checkout.convertCurrency", "checkout.quoteShipping", "oteldemo.CheckoutService/PlaceOrder"]
        }

    def build_cooccurrence_matrix(self) -> Dict:
        """
        构建字段值共现矩阵
        统计相同 value 在不同 field 中出现的次数
        
        Returns:
            Dict: 共现矩阵
        """
        # 获取所有的 field 组合（operation.field）
        all_fields = set()
        for value, fields in self.value_field_index.items():
            all_fields.update(fields)

        all_fields = sorted(all_fields)

        # 初始化共现矩阵
        cooccurrence = {}
        for field1 in all_fields:
            cooccurrence[field1] = {}
            for field2 in all_fields:
                cooccurrence[field1][field2] = 0

        # 统计共现次数
        for value, fields in self.value_field_index.items():
            fields_list = list(fields)
            # 对于每个值，统计它在哪些字段中出现
            for i, field1 in enumerate(fields_list):
                for field2 in fields_list:
                    cooccurrence[field1][field2] += 1

        return cooccurrence

    # fixme 重写
    def calculate_idf_weights(self, normalize: bool = True):
        """
        计算字段的 IDF（逆文档频率）权重
        IDF = log(总值数 / 包含该字段的值数)
        
        Args:
            normalize: 是否归一化 IDF 权重
        """
        # 统计每个字段（operation.field）出现在多少个不同的值中
        field_document_count = defaultdict(int)
        total_values = len(self.value_field_index)

        for value, fields in self.value_field_index.items():
            for field in fields:
                field_document_count[field] += 1

        # 计算 IDF
        for field, doc_count in field_document_count.items():
            if doc_count > 0:
                # 加 1 避免除零，使用自然对数
                self.field_idf[field] = math.log((total_values + 1) / (doc_count + 1))
            else:
                self.field_idf[field] = 0.0

        # 归一化 IDF（使用 L2 范数）
        if normalize and self.field_idf:
            idf_values = list(self.field_idf.values())
            # L2 归一化
            l2_norm = math.sqrt(sum(v ** 2 for v in idf_values))
            if l2_norm > 0:
                for field in self.field_idf:
                    self.field_idf[field] /= l2_norm

        # 按 operation 组织字段权重（仅在使用 idf 方法时更新）
        if self.weight_method == 'idf':
            for field, idf in self.field_idf.items():
                parts = field.split('.')
                if len(parts) >= 2:
                    op = '.'.join(parts[:-1])
                    field_name = parts[-1]
                    self.field_weights[op][field] = idf

    def calculate_entropy_weights(self):
        """
        计算字段的信息熵（Shannon Entropy）
        Entropy = -Σ p(x) * log(p(x))，其中 p(x) 是 x 值在 Field 所有值中出现的频率（“频率估计概率”）
        """

        for process, operation_field_values in self.field_value_index.items():
            for operation, field_values in operation_field_values.items():
                max_entropy = 0.0
                field_entropy = {}
                # 遍历统计频率
                for field, values in field_values.items():
                    values = list(values)
                    if not values:  # 处理空值列表的情况
                        field_entropy[field] = 0.0
                        continue

                    # 统计每个唯一值的频率
                    unique_values = set(values)
                    entropy = 0.0
                    for unique_value in unique_values:
                        frequency = values.count(unique_value) / len(values)
                        if frequency > 0:  # 避免log(0)
                            entropy += -frequency * math.log(frequency)
                    field_entropy[field] = entropy
                    max_entropy = max(max_entropy, entropy)

                # 归一化信息熵
                if not max_entropy:
                    max_entropy = 1.0
                for field in field_values.keys():
                    self.field_weights[process][operation][field] = field_entropy[field] / max_entropy

    # fixme 重写
    def calculate_bm25_weights(self, k1: float = 1.5, b: float = 0.75):
        """
        计算 BM25 权重（改进的 TF-IDF）
        
        Args:
            k1: 词频饱和参数（通常 1.2-2.0）
            b: 长度归一化参数（通常 0.75）
        """
        # 统计字段频率和文档长度
        field_tf = defaultdict(int)  # 字段词频
        field_df = defaultdict(int)  # 字段文档频率
        doc_lengths = []

        for value, fields in self.value_field_index.items():
            doc_length = len(fields)
            doc_lengths.append(doc_length)
            for field in fields:
                field_tf[field] += 1
                field_df[field] += 1

        avg_doc_length = sum(doc_lengths) / len(doc_lengths) if doc_lengths else 1.0
        total_docs = len(self.value_field_index)

        # 计算 BM25
        for field in field_df:
            # IDF 部分
            idf = math.log((total_docs - field_df[field] + 0.5) / (field_df[field] + 0.5) + 1.0)

            # TF 部分（简化版，假设每个字段在文档中只出现一次）
            tf = field_tf[field]
            norm_tf = (tf * (k1 + 1)) / (tf + k1 * (1 - b + b * (1.0 / avg_doc_length)))

            self.field_bm25[field] = idf * norm_tf

        # L2 归一化
        bm25_values = list(self.field_bm25.values())
        l2_norm = math.sqrt(sum(v ** 2 for v in bm25_values))
        if l2_norm > 0:
            for field in self.field_bm25:
                self.field_bm25[field] /= l2_norm

    def create_diagonal_matrix(self, weights: Dict[str, float], fields: List[str]) -> List[List[float]]:
        """
        创建对角权重矩阵
        
        Args:
            weights: 字段权重字典
            fields: 字段列表（有序）
            
        Returns:
            对角矩阵
        """
        n = len(fields)
        matrix = [[0.0 for _ in range(n)] for _ in range(n)]

        for i, field in enumerate(fields):
            matrix[i][i] = weights.get(field, 1.0)

        return matrix

    def matrix_multiply(self, A: List[List[float]], B: List[List[float]]) -> List[List[float]]:
        """
        矩阵乘法 A × B
        
        Args:
            A: 矩阵 A
            B: 矩阵 B
            
        Returns:
            乘积矩阵
        """
        rows_A = len(A)
        cols_A = len(A[0]) if A else 0
        rows_B = len(B)
        cols_B = len(B[0]) if B else 0

        if cols_A != rows_B:
            raise ValueError(f"矩阵维度不匹配: ({rows_A}x{cols_A}) × ({rows_B}x{cols_B})")

        result = [[0.0 for _ in range(cols_B)] for _ in range(rows_A)]

        for i in range(rows_A):
            for j in range(cols_B):
                for k in range(cols_A):
                    result[i][j] += A[i][k] * B[k][j]

        return result

    def matrix_is_zero(self, A: List[List[float]]) -> bool:
        """
        判断矩阵是否为零矩阵
        
        Args:
            A: 输入矩阵
            
        Returns:
            bool: 如果矩阵所有元素都为0则返回True，否则返回False
        """
        for row in A:
            for element in row:
                if element != 0.0:
                    return False
        return True

    def matrix_normalize(self, A: List[List[float]]):
        """
        矩阵归一化，原地实现
        
        Args:
            A: 输入矩阵
            
        """
        rows = len(A)
        cols = len(A[0]) if A else 0

        for i in range(rows):
            row_sum = sum(A[i])
            if row_sum != 0:
                for j in range(cols):
                    A[i][j] /= row_sum

    def build_op_correlation_matrices(self) -> Dict:
        """
        构建操作级别的关联矩阵
        使用公式: op1_weights × cooccurrence × op2_weights
        
        Returns:
            Dict: {process: {(op1, op2): correlation_matrix}}
        """
        correlation_matrices = defaultdict(dict)

        # 获取所有的 operation 对（基于依赖关系，按 process 分组）
        for process, op_pairs in self.operation_dependencies.items():
            operation_field_weights = self.field_weights[process]
            for src_op, tgt_op in op_pairs:
                src_field_weights = operation_field_weights[src_op]
                tgt_field_weights = operation_field_weights[tgt_op]
                src_fields = list(src_field_weights.keys())
                tgt_fields = list(tgt_field_weights.keys())

                if not src_fields or not tgt_fields:
                    continue

                # 构建值共现矩阵（size = len(src_fields) × len(tgt_fields)）
                value_cooccurrence = [[0.0 for _ in range(len(tgt_fields))] for _ in range(len(src_fields))]

                for i, src_field in enumerate(src_fields):
                    for j, tgt_field in enumerate(tgt_fields):
                        # 统计共享相同值的次数
                        shared_values = 0
                        for value, fields in self.value_field_index.items():
                            fields = [f.field for f in fields]
                            if src_field in fields and tgt_field in fields:
                                shared_values += 1
                        value_cooccurrence[i][j] = float(shared_values)
                self.matrix_normalize(value_cooccurrence)  # 共现矩阵归一化

                # 创建权重对角矩阵
                src_weight_matrix = self.create_diagonal_matrix(src_field_weights, src_fields)
                tgt_weight_matrix = self.create_diagonal_matrix(tgt_field_weights, tgt_fields)

                # 计算关联矩阵: W_src × V × W_tgt
                temp = self.matrix_multiply(src_weight_matrix, value_cooccurrence)
                correlation = self.matrix_multiply(temp, tgt_weight_matrix)

                # 如果 correlation 为空，报错
                if self.matrix_is_zero(correlation):
                    print(f"操作关联矩阵为零矩阵: {process} - {src_op} → {tgt_op}")

                correlation_matrices[process][(src_op, tgt_op)] = {
                    'src_fields': src_fields,
                    'tgt_fields': tgt_fields,
                    'src_weights': src_field_weights,
                    'tgt_weights': tgt_field_weights,
                    'value_cooccurrence': value_cooccurrence,
                    'correlation_matrix': correlation
                }

        return correlation_matrices
