"""
预测阶段程序
根据预处理阶段生成的 similarity_matrix 和 priority_fields，预测 Live Spans 之间的关联关系。
"""

import json
from collections import defaultdict
from functools import cache
from typing import Dict, List, Tuple, Optional

import config
import json_utils


class CorrelationMeasure:
    """Span 之间的关联性度量"""

    def __init__(self, tuple_key_measure, tuple_value_measure):
        self.key_measure = tuple_key_measure
        self.value_measure = tuple_value_measure

    def __lt__(self, other):
        if self.key_measure < other.key_measure:
            return True
        if self.key_measure > other.key_measure:
            return False
        return self.value_measure < other.value_measure

    def __gt__(self, other):
        if self.key_measure > other.key_measure:
            return True
        if self.key_measure < other.key_measure:
            return False
        return self.value_measure > other.value_measure

    def __str__(self):
        return f"({self.key_measure}, {self.value_measure})"


zero_measure = CorrelationMeasure(0.0, 0.0)


class SpanLinkPredictor:
    """Span 关联预测器"""

    def __init__(self, preprocess_result_file: str, blocked_services, weight_topk, allow_zero_score):
        """
        初始化预测器
        
        Args:
            preprocess_result_file: 预处理阶段输出的 JSON 文件路径
            weight_topk: 预测时计算的字段数量
        """
        self.preprocess_data = {}
        self.blocked_services = blocked_services
        self.weight_topk = weight_topk

        # 操作间的字段关联矩阵
        self.similarity_matrix = {}
        # 不同操作的高信息量字段列表：{process: {operation: [fields]}}
        self.priority_fields = {}
        # 字段权重列表，与 Analyzer 的 field_weights 是同构的：{process: {operation: {field: weight}}}
        self.field_weights = {}

        self.allow_zero_score = allow_zero_score  # 是否允许 score = 0 的候选映射作为预测结果

        # 加载预处理结果
        self.load_preprocess_result(preprocess_result_file)

        # Live spans 数据
        self.live_spans = []  # 存储所有 live spans。或维护在 executor.all_spans。
        self.live_in_spans = []  # server spans (入站)
        self.live_out_spans = []  # client spans (出站)

        # 按进程分组的 spans
        self.in_spans_by_process = defaultdict(list)  # {process: [in_spans]}
        self.out_spans_by_process = defaultdict(list)  # {process: [out_spans]}

        # 已关联的 span 集合
        self.linked_out_spans = set()

    def load_preprocess_result(self, result_file: str):
        """加载预处理阶段的结果"""
        with open(result_file, 'r', encoding='utf-8') as f:
            self.preprocess_data = json.load(f)

        # 解析 similarity_matrix
        if 'operation_correlation_matrices' in self.preprocess_data:
            op_corr_matrices = self.preprocess_data['operation_correlation_matrices']
            # 格式为 {process: {matrix_key: matrix_data}}
            for process, process_matrices in op_corr_matrices.items():
                for key, data in process_matrices.items():
                    self.similarity_matrix[key] = {
                        'src_fields': data['src_fields'],
                        'tgt_fields': data['tgt_fields'],
                        'matrix': data['correlation_matrix']
                    }

        # 解析 priority_fields（高信息量字段）
        if 'field_weights' in self.preprocess_data:
            # filed 信息量列表 {process: {operation: {field: weight}}}
            self.field_weights = self.preprocess_data['field_weights']

        # 按 operation 分组，按 weight topk 选择 priority_fields
        for process, op_weights in self.field_weights.items():
            self.priority_fields[process] = {}
            for op, weights in op_weights.items():
                # 将字段按权重排序（降序），并选择前 weight_topk 个
                sorted_fields = sorted(weights.items(), key=lambda x: x[1], reverse=True)
                priority_fields = [f for f, w in sorted_fields[:self.weight_topk]]
                self.priority_fields[process][op] = priority_fields

        print(f"已加载预处理结果:")
        print(f"  - 操作关联矩阵: {len(self.similarity_matrix)} 个")
        print(f"  - 高信息量字段: {len(self.priority_fields)} 个操作")

    def filter_by_priority_fields(self, span: Dict) -> List[Tuple[str, str]]:
        """
        按 priority_fields 过滤 tuple
        
        Args:
            span: span 数据
            
        Returns:
            过滤后的 (field, value) tuples
        """
        process = self.live_spans[span['span_id']].process
        operation = span['operation']
        fields = span.get('fields', {})

        # 提取所有 (field, value) tuples
        tuples = json_utils.flatten_json(fields)

        # 如果该 operation 有 priority_fields，则过滤
        if process in self.priority_fields and operation in self.priority_fields[process]:
            priority = set(self.priority_fields[process][operation])
            filtered = [(f, v) for f, v in tuples if f in priority]
            return filtered if filtered else tuples  # 如果过滤后为空，返回所有

        return tuples

    @staticmethod
    def _extract_process_from_operation(operation: str) -> str:
        """
        从操作名中提取进程/服务名
        
        Args:
            operation: 操作名
            
        Returns:
            进程/服务名
        """
        if "." in operation:
            parts = operation.split(".")
            if len(parts) >= 2:
                return parts[0]  # 第一部分通常是服务名
        return operation

    @staticmethod
    def _partition_spans_by_endpoint(spans: List[Dict], endpoint_lambda) -> Dict[str, List[Dict]]:
        """
        按 endpoint 分区 spans
        
        Args:
            spans: span 列表
            endpoint_lambda: 提取 endpoint 的函数
            
        Returns:
            按 endpoint 分组的 spans
        """
        partitions = {}
        for span in spans:
            ep = endpoint_lambda(span)
            if ep is None:
                continue
            if ep not in partitions:
                partitions[ep] = []
            partitions[ep].append(span)

        return partitions

    @staticmethod
    @cache
    def calculate_key_similarity(s1: str, s2: str) -> float:
        """
        计算两个字符串之间的 Jaro-Winkler 相似度。可缓存。

        参数:
            s1 (str): 第一个字符串
            s2 (str): 第二个字符串

        返回:
            float: 相似度分数，范围 [0, 1]，1 表示完全相同
        """
        if not s1 and not s2:
            return 1.0
        if not s1 or not s2:
            return 0.0

        len1, len2 = len(s1), len(s2)
        if len1 == 0 or len2 == 0:
            return 0.0

        # 转小写
        s1, s2 = s1.lower(), s2.lower()
        p: float = 0.1  # 缩放因子，通常为 0.1
        max_prefix: int = 4  # 最大考虑前缀长度（通常为 4）

        # 步骤 1: 定义匹配窗口
        window = max(len1, len2) // 2 - 1
        if window < 0:
            window = 0

        # 步骤 2: 找出匹配字符和匹配位置
        matched1 = [False] * len1  # s1 中哪些字符已匹配
        matched2 = [False] * len2  # s2 中哪些字符已匹配
        matches = 0  # 匹配字符总数 m

        for i in range(len1):
            start = max(0, i - window)
            end = min(i + window + 1, len2)
            for j in range(start, end):
                if not matched2[j] and s1[i] == s2[j]:
                    matched1[i] = True
                    matched2[j] = True
                    matches += 1
                    break

        if matches == 0:
            return 0.0

        # 步骤 3: 计算错位（transpositions）
        # 在匹配字符中，有多少对是顺序不同的？
        t = 0  # 错位数的一半
        k = 0  # 遍历 s2 的指针
        for i in range(len1):
            if matched1[i]:
                while not matched2[k]:
                    k += 1
                if s1[i] != s2[k]:
                    t += 1
                k += 1
        t //= 2  # t 是“错位对”的数量的一半

        # 步骤 4: 计算 Jaro 相似度
        jaro = (matches / len1 +
                matches / len2 +
                (matches - t) / matches) / 3.0

        # 步骤 5: 计算公共前缀长度（最多 max_prefix）
        prefix_len = 0
        for i in range(min(len1, len2, max_prefix)):
            if s1[i] == s2[i]:
                prefix_len += 1
            else:
                break

        # 步骤 6: 加上 Winkler 奖励
        winkler = jaro + (prefix_len * p * (1 - jaro))

        return winkler

    def calculate_tuple_similarity(self, out_span: Dict, in_span: Dict) -> CorrelationMeasure:
        """
        计算两个 span 之间的关联性，考虑 1. value 共现情况，2. key 相似性。
        
        Args:
            out_span: client span
            in_span: server span
            
        Returns:
            相似度分数
        """
        out_op = out_span['operation']
        in_op = in_span['operation']

        # 查找对应的关联矩阵
        matrix_key = f"{in_op} -> {out_op}"
        if matrix_key not in self.similarity_matrix:
            # 如果没有定义在关联矩阵里面，直接返回 0 分
            return zero_measure

        matrix_data = self.similarity_matrix[matrix_key]
        src_fields = matrix_data['src_fields']
        tgt_fields = matrix_data['tgt_fields']
        correlation_matrix = matrix_data['matrix']

        # 提取 out_span 和 in_span 的字段值
        out_tuples = self.filter_by_priority_fields(out_span)
        in_tuples = self.filter_by_priority_fields(in_span)

        # 构建字段值映射
        out_field_values = {field: value for field, value in out_tuples}
        in_field_values = {field: value for field, value in in_tuples}

        # 计算加权相似度
        value_measure = 0.0  # value_measure 相当于共现矩阵乘以关联矩阵。
        key_measure = 0.0  # key_measure 相当于共现之后比较字符串。
        cooccurrence_count = 0  # 共现计数
        for i, in_field in enumerate(src_fields):
            for j, out_field in enumerate(tgt_fields):
                # 检查值是否匹配
                if out_field_values.get(out_field, 1) == in_field_values.get(in_field, 0):
                    # 使用关联矩阵中的权重
                    value_measure += correlation_matrix[i][j]
                    # 使用缓存计算
                    key_measure += self.calculate_key_similarity(out_field, in_field)
                    # 计数加一
                    cooccurrence_count += 1

        if cooccurrence_count == 0:
            return zero_measure
        return CorrelationMeasure(key_measure / cooccurrence_count, value_measure / cooccurrence_count)

    def find_best_match(self, out_span: Dict, candidate_span_ids: List[str]) -> Optional[str]:
        """
        在候选 span 中找到最佳匹配
        
        Args:
            out_span: client span
            candidate_span_ids: 候选 in_span 列表，server span IDs
            
        Returns:
            (best_span_id, best_score)，如果没有匹配则返回 (None, -1.0)
        """
        best_span_id = None
        best_score = zero_measure

        for span_id in candidate_span_ids:
            # 找到对应的 in_span
            in_span = next((s for s in self.live_in_spans if s['span_id'] == span_id), None)
            if in_span is None:
                continue

            # 计算相似度
            score = self.calculate_tuple_similarity(out_span, in_span)

            if score > best_score:
                best_score = score
                best_span_id = span_id

        if self.allow_zero_score ^ (best_score.value_measure > 0):
            return best_span_id
        return None

    def predict_links(self) -> Dict[str, str]:
        """
        预测 out_spans 和 in_spans 之间的关联关系 - 按 process 逐个处理

        Returns:
            Dict: {out_span_id -> in_span_id}
            一个 out_span 只能有一个 in_span，而一个 in_span 可能有多个 out_span
        """
        links = {}

        print("\n开始预测 Span 关联关系（按进程处理）...")

        # 遍历系统中的全体 process
        for process in self.out_spans_by_process.keys():
            print(f"\nPROCESSING THIS PROCESS: {process}")

            # 跳过黑名单服务
            if process in self.blocked_services:
                print(f"SKIPPING THIS PROCESS: {process} in block list.")
                continue

            # 跳过边缘服务 (没有 in_spans 或 out_spans)
            if process not in self.in_spans_by_process or process not in self.out_spans_by_process:
                print(f"SKIPPING THIS PROCESS: {process} is an edge service.")
                continue

            in_spans = self.in_spans_by_process[process]
            out_spans = self.out_spans_by_process[process]

            if len(out_spans) == 0:
                print(f"SKIPPING THIS PROCESS: {process} has no out_spans.")
                continue

            # 按 endpoint 分区 spans
            in_span_partitions = self._partition_spans_by_endpoint(
                in_spans, lambda x: x['operation']
            )
            out_span_partitions = self._partition_spans_by_endpoint(
                out_spans, lambda x: x['operation']
            )

            in_eps = list(in_span_partitions.keys())
            out_eps = list(out_span_partitions.keys())

            print(f"In span has {len(in_eps)} partitions: {in_eps}")
            print(f"Out span has {len(out_eps)} partitions: {out_eps}")

            # 跳过自调用服务 (检查是否有相同的操作名在 in 和 out 中)
            has_self_calls = False
            for in_ep in in_eps:
                if in_ep in out_eps:
                    has_self_calls = True
                    break
            if has_self_calls:
                print(f"SKIPPING THIS PROCESS: {process} has self-calls.")
                continue

            # 执行预测
            process_links = self._predict_for_process(process, in_span_partitions, out_span_partitions)

            # 合并结果
            links.update(process_links)

            print(f"\n{process} 服务预测了 {len(process_links)} / {len(self.out_spans_by_process[process])} 个关联关系")

        print(f"\notel-demo 应用预测了 {len(links)} / {len(self.live_out_spans)} 个关联关系")

        return links

    def _predict_for_process(self, process: str, in_span_partitions: Dict, out_span_partitions: Dict, epoch=1) -> Dict[
        str, str]:
        """
        预测 out_spans 和 in_spans 之间的关联关系
        # todo 使用更好的 DAG 结构组织计算顺序。支持 out_op -> out_op 计算，描述下游操作之间的数据依赖

        Returns:
            Dict: {out_span_id -> in_span_id}
            一个 out_span 只能有一个 in_span，而一个 in_span 可能有多个 out_span
        """
        links = {}
        unlinked_out_spans = []
        last_linked_out_spans_count = len(self.linked_out_spans)

        print(f"\n第 {epoch} 轮预测 {process} 服务的 Span 关联关系")

        for out_op, out_spans in out_span_partitions.items():
            for in_op, in_spans in in_span_partitions.items():
                matrix_key = f"{in_op} -> {out_op}"
                # 只保留有预定义关联矩阵的候选
                if matrix_key not in self.similarity_matrix:
                    continue

                for out_span in out_spans:
                    out_span_id = out_span['span_id']

                    # 初始化为全体 in_span
                    candidate_span_ids = [
                        s['span_id'] for s in in_spans
                    ]

                    # 按 start_mus 升序排序。
                    candidate_span_ids.sort(key=lambda s: self.live_spans[s].start_mus)

                    # 根据时间约束过滤：in_span happens_before out_span
                    filtered_candidates = []
                    for span_id in candidate_span_ids:
                        in_span = next((s for s in in_spans if s['span_id'] == span_id), None)
                        if in_span and self.live_spans[in_span['span_id']].happens_before(
                                self.live_spans[out_span['span_id']]):
                            filtered_candidates.append(span_id)

                    # 如果过滤后没有候选，使用原始候选
                    if filtered_candidates:
                        candidate_span_ids = filtered_candidates

                    if not candidate_span_ids:
                        unlinked_out_spans.append(out_span)
                        continue

                    # 在候选中找到最佳匹配
                    best_span_id = self.find_best_match(out_span, candidate_span_ids)
                    if best_span_id:
                        links[out_span_id] = best_span_id
                        self.linked_out_spans.add(best_span_id)
                        if config.VERBOSE:
                            in_span = next((s for s in in_spans if s['span_id'] == best_span_id), None)
                            if in_span:
                                print(
                                    f"  {out_span_id} ({out_op}) -> {best_span_id} ({in_span['operation']})")
                    else:
                        unlinked_out_spans.append(out_span)
                if config.VERBOSE:
                    print(
                        f"\n{out_op} 接口: {len(links)} / {len(self.out_spans_by_process[process])} 个下游消息已关联")

        # 递归出口
        if len(self.linked_out_spans) == last_linked_out_spans_count:
            if config.VERBOSE:
                print(
                    f"\n{process} 服务: {len(self.linked_out_spans)} / {len(self.out_spans_by_process[process])} 个下游消息已关联")
            return links

        # 在线更新权重 self.filed_weights，借助 unlinked_out_spans
        self.update_field_weights(unlinked_out_spans)

        # 下一轮递归
        unlinked_out_span_partitions = self._partition_spans_by_endpoint(
            unlinked_out_spans, lambda x: x['operation']
        )
        remaining_links = self._predict_for_process(process, in_span_partitions, unlinked_out_span_partitions,
                                                    epoch + 1)
        links.update(remaining_links)
        return links

    def evaluate_predictions(self, links: Dict[str, str]) -> Dict:
        """
        评估预测结果
        
        Args:
            links: 预测的关联关系
            
        Returns:
            评估指标
        """
        total = len(self.live_out_spans)
        correct = 0
        incorrect = 0
        missing = 0

        for out_span in self.live_out_spans:
            out_span_id = out_span['span_id']

            # 找到对应的 expected in_span
            expected_in_span_id = None
            for in_span in self.live_in_spans:
                if in_span.get('expected_client_span_id') == out_span_id:
                    expected_in_span_id = in_span['span_id']
                    break

            if out_span_id in links:
                predicted_in_span_id = links[out_span_id]
                if predicted_in_span_id == expected_in_span_id:
                    correct += 1
                else:
                    incorrect += 1
                    if config.VERBOSE:
                        print(f"错误匹配: {out_span_id} -> {predicted_in_span_id} (期望: {expected_in_span_id})")
            else:
                missing += 1

        accuracy = correct / total if total > 0 else 0.0
        precision = correct / (correct + incorrect) if (correct + incorrect) > 0 else 0.0
        recall = correct / (correct + missing) if (correct + missing) > 0 else 0.0
        f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0

        metrics = {
            "total": total,
            "correct": correct,
            "incorrect": incorrect,
            "missing": missing,
            "accuracy": accuracy,
            "precision": precision,
            "recall": recall,
            "f1_score": f1
        }

        return metrics

    def export_results(self, links: Dict[str, str], metrics: Dict, output_file: str):
        """
        导出预测结果
        
        Args:
            links: 预测的关联关系
            metrics: 评估指标
            output_file: 输出文件路径
        """
        results = {
            "summary": {
                "total_out_spans": len(self.live_out_spans),
                "total_in_spans": len(self.live_in_spans),
                "total_links": len(links),
            },
            "metrics": metrics,
            "links": links,
            "details": {
                "out_spans": self.live_out_spans,
                "in_spans": self.live_in_spans
            }
        }

        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump(results, f, ensure_ascii=False, indent=2)

        print(f"\n预测结果已导出到: {output_file}")

    def update_field_weights(self, out_spans):
        pass
