"""
Executor 脚本 - 串联完整的流水线，将 analyze_field_cooccurrence.py 和 predict_span_links.py 打通。
"""

import argparse
import json
import os
import sys
from collections import defaultdict
from typing import Dict, List

import config
import json_utils
from analyze_field_cooccurrence import FieldCooccurrenceAnalyzer
from common import Field
from predict_span_links import SpanLinkPredictor
from trace_loader import TraceLoader


class SpanLinkExecutor:
    """完整的 Span 关联预测执行器"""

    def __init__(self, data_file: str, blocked_services: List[str], weight_method: str = 'entropy',
                 weight_topk: int = 5, predict_method='CbT'):
        """
        初始化执行器
        
        Args:
            data_file: 数据文件路径
            weight_method: 字段权重计算方式，可选值: 'idf', 'entropy', 'bm25'
            weight_topk: 权重 Top-K 参数，用于选择权重最高的 K 个字段
        """
        self.data_file = data_file
        self.blocked_services = blocked_services
        self.weight_method = weight_method
        self.weight_topk = weight_topk
        self.predict_method = predict_method

        # 数据存储
        self.all_traces = []  # 存储所有 trace 数据
        self.all_spans = {}  # 键为 span_id，值为 span 结构体
        self.all_processes = {}  # 进程映射

        # 分区数据
        self.in_spans_by_process = {}  # 按 process 分组的 in_spans
        self.out_spans_by_process = {}  # 按 process 分组的 out_spans
        self.in_span_partitions_by_process = {}  # 按 process 再按 operation 分组
        self.out_span_partitions_by_process = {}  # 按 process 再按 operation 分组

        # 预测和真实关联关系
        self.pred_assignments = defaultdict(dict)  # pred_assignments[out_ep][out_span.GetId()] = in_span.GetId()
        self.true_assignments = defaultdict(dict)  # true_assignments[out_ep][out_span.GetId()] = in_span.GetId()

        # 预处理数据
        self.correlation_matrices = None
        self.analyzer = None

    def load_data(self):
        """步骤1: 加载 json 数据
        加载和解析 JSON 数据的部分基本没问题，以后避免优先考虑 debug 这一块。
        """
        print("=" * 80)
        print("步骤1: 加载数据")
        print("=" * 80)

        print(f"正在加载数据文件: {self.data_file}")

        # 直接解析 JSON 文件
        with open(self.data_file, 'r', encoding='utf-8') as f:
            json_data = json.load(f)

        # 创建 TraceLoader 实例，选 7 对应 otel-demo 应用的 ingress 操作。
        loader = TraceLoader(fix_mode=7)

        # 解析每个 trace
        for trace_data in json_data["data"]:
            trace_id = trace_data["traceID"]

            # 直接使用 parse_json_trace 方法
            try:
                # 使用 parse_json_trace 解析
                parsed_trace_id, spans, processes, _, _ = loader.parse_json_trace(trace_data)

                if parsed_trace_id is None:
                    continue

                # 处理 trace 数据
                success = loader.process_trace_data(parsed_trace_id, spans, processes)
                if success:
                    self.all_traces.append({
                        'trace_id': parsed_trace_id,
                        'spans': spans,
                        'processes': processes
                    })

            except Exception as e:
                if config.VERBOSE:
                    print(f"跳过 trace {trace_id}: {e}")
                continue

        # 合并所有数据
        self.all_processes = loader.all_processes

        self.in_spans_by_process = loader.in_spans_by_process
        self.out_spans_by_process = loader.out_spans_by_process

        # self.all_spans = loader.all_spans
        # executor.all_spans 应该定义为 in_spans 与 out_spans 的归并，而不是简单用 loader.all_spans，中间经过了 IsInSpan 过滤。
        # 直接用 loader.all_spans，在 extract_field_values 的时候混一堆无关的 op_name 进来，比如既有 GET 又有 HTTP GET。
        for process, spans in loader.in_spans_by_process.items():
            if process in self.blocked_services:
                continue
            for span in spans:
                self.all_spans[span.GetId()] = span
        for process, spans in loader.out_spans_by_process.items():
            if process in self.blocked_services:
                continue
            for span in spans:
                self.all_spans[span.GetId()] = span

        print(f"成功加载 {len(self.all_traces)} 个 traces")
        print(f"总 Span 数: {len(self.all_spans)}")
        print(f"总进程数: {len(self.in_spans_by_process) + len(self.out_spans_by_process)}")

        # 构建 in_span_partitions (按 operation 分组)
        self._build_span_partitions()

        # 构建真实关联关系
        self._build_true_assignments()

    def _build_span_partitions(self):
        """构建 span 分区"""
        print("\n构建 Span 分区...")

        # 按 process 再按 operation 分组
        for process, spans in self.in_spans_by_process.items():
            if process not in self.in_span_partitions_by_process:
                self.in_span_partitions_by_process[process] = {}

            for span in spans:
                operation = span.op_name
                if operation not in self.in_span_partitions_by_process[process]:
                    self.in_span_partitions_by_process[process][operation] = []
                self.in_span_partitions_by_process[process][operation].append(span)

        # 按 process 再按 operation 分组
        for process, spans in self.out_spans_by_process.items():
            if process not in self.out_span_partitions_by_process:
                self.out_span_partitions_by_process[process] = {}

            for span in spans:
                operation = span.op_name
                if operation not in self.out_span_partitions_by_process[process]:
                    self.out_span_partitions_by_process[process][operation] = []
                self.out_span_partitions_by_process[process][operation].append(span)

    def _build_true_assignments(self):
        """
        构建真实关联关系 (基于 trace 结构)
        比如 frontend 服务需要拼接一下 parent_span_id
        """
        print("\n构建真实关联关系...")

        for trace_data in self.all_traces:
            spans = trace_data['spans']
            # 遍历所有 server spans (in_span)
            for span_id, span in spans.items():
                if span.IsInSpan():
                    # 找到对应的 client span (通过 children_spans)
                    if not span.children_spans:
                        continue
                    for child_span_id in span.children_spans:
                        if child_span_id in spans:
                            child_span = spans[child_span_id]
                            if child_span.IsOutSpan():
                                # 构建 endpoint
                                out_ep = child_span.GetProcessOperation(self.all_processes)
                                if out_ep:
                                    self.true_assignments[out_ep][child_span.GetId()] = span.GetId()

        total_assignments = sum(len(assignments) for assignments in self.true_assignments.values())
        print(f"构建了 {total_assignments} 个真实关联关系")

    def analyze_field_cooccurrence(self):
        """步骤2: 根据 span.content 统计和计算 field_cooccurrence"""
        print("\n" + "=" * 80)
        print("步骤2: 分析字段共现")
        print("=" * 80)

        # 创建字段共现分析器，使用指定的权重计算方式
        analyzer = FieldCooccurrenceAnalyzer(weight_method=self.weight_method)

        # 加载依赖关系
        analyzer.load_operation_dependencies()
        analyzer.load_data_dependencies()
        # analyzer.load_field_dependencies()

        # 从实际数据中提取字段值
        self.extract_field_values(analyzer)

        # 计算字段权重
        print("计算字段权重...")
        print(f"\n使用 {self.weight_method.upper()} 作为权重计算方式...")
        print(f"权重 Top-K 参数: {self.weight_topk}")

        if self.weight_method == 'entropy':
            print(f"  - 计算信息熵 (Entropy) 权重...")
            analyzer.calculate_entropy_weights()
        elif self.weight_method == 'bm25':
            print(f"  - 计算 BM25 权重...")
            analyzer.calculate_bm25_weights()
        else:
            print(f"  - 计算归一化 IDF 权重...")
            analyzer.calculate_idf_weights(normalize=True)

        # 构建操作关联矩阵
        print("构建操作关联矩阵...")
        self.correlation_matrices = analyzer.build_op_correlation_matrices()
        self.analyzer = analyzer

        total_matrices = sum(len(matrices) for matrices in self.correlation_matrices.values())
        print(f"完成字段共现分析，生成了 {total_matrices} 个操作关联矩阵")

        # 保存预处理结果，供 SpanLinkPredictor 使用
        preprocess_file = self.data_file[:-5] + "_preprocess.json"
        self._save_preprocess_result(preprocess_file)

    def extract_field_values(self, analyzer):
        """从 spans 中提取字段值，构建 Field 级别的语料库。"""

        def is_blocked_value(value: str):
            value = value.lower()
            # 使用集合进行更高效的查找
            blocked_values = {
                # 布尔值
                'true', 'false',
                # 常见操作状态
                'success', 'fail', 'error',
                # 常见枚举值
                '0', '1', '2', '3', '4'
            }
            return value in blocked_values

        print("从 Spans 中提取字段值...")

        field_count = 0
        for span_id, span in self.all_spans.items():
            if hasattr(span, 'content') and span.content:
                process = span.process
                operation = span.op_name

                # 扁平化 content 字典
                field_values = json_utils.flatten_json(span.content)

                for field, value in field_values:
                    if value and str(value).strip():  # 过滤空值
                        if is_blocked_value(value):
                            continue
                        analyzer.field_value_index[process][operation][field].append(str(value))
                        analyzer.value_field_index[str(value)].add(Field(process, operation, field))
                        field_count += 1

        print(f"提取了 {field_count} 个字段值")
        print(f"唯一值数量: {len(analyzer.value_field_index)}")

    def predict_span_links(self):
        """步骤3: 根据 field_cooccurrence 预测 in_span 与 out_span 之间的关联关系"""
        print("\n" + "=" * 80)
        print("步骤3: 预测 Span 关联关系")
        print("=" * 80)

        # 创建 SpanLinkPredictor 实例
        preprocess_file = self.data_file[:-5] + "_preprocess.json"
        allow_zero_score = False
        if 'FCFS' == self.predict_method:
            allow_zero_score = True
        predictor = SpanLinkPredictor(preprocess_file, self.blocked_services, self.weight_topk, allow_zero_score)

        # 转换 spans 数据格式为 SpanLinkPredictor 期望的格式
        self._load_spans_to_predictor(predictor)

        # 执行预测
        links = predictor.predict_links()

        # 转换预测结果回原始格式
        self._restore_predictions(links)

        total_predictions = sum(len(assignments) for assignments in self.pred_assignments.values())
        print(f"\n完成预测，总共生成了 {total_predictions} 个预测关联")

    def _save_preprocess_result(self, temp_file: str):
        """保存预处理结果到临时文件"""

        # 构建与 analyzer 兼容的 json schema
        preprocess_data = {
            'operation_correlation_matrices': {},
            'field_weights': self.analyzer.field_weights,
            'field_weight_method': self.weight_method
        }

        # 转换关联矩阵格式
        # correlation_matrices 的格式是 {process: {(src_op, tgt_op): matrix_data}}
        for process, process_matrices in self.correlation_matrices.items():
            preprocess_data['operation_correlation_matrices'][process] = {}
            for (src_op, tgt_op), matrix_data in process_matrices.items():
                matrix_key = f"{src_op} -> {tgt_op}"
                preprocess_data['operation_correlation_matrices'][process][matrix_key] = {
                    'src_fields': matrix_data.get('src_fields', []),
                    'tgt_fields': matrix_data.get('tgt_fields', []),
                    'correlation_matrix': matrix_data.get('correlation_matrix', [])
                }

        with open(temp_file, 'w', encoding='utf-8') as f:
            json.dump(preprocess_data, f, ensure_ascii=False, indent=2)

    def _load_spans_to_predictor(self, predictor):
        """
        转换 spans 数据格式为 SpanLinkPredictor 期望的格式
        #todo 这里其实不太需要 span_data 这一层抽象，使用 Span 类即可满足。
        """
        predictor.live_spans = self.all_spans
        predictor.live_in_spans = []
        predictor.live_out_spans = []

        # 转换 out_spans (client spans)
        for process, out_spans in self.out_spans_by_process.items():
            if process not in predictor.out_spans_by_process:
                predictor.out_spans_by_process[process] = []
            for span in out_spans:
                span_data = {
                    'span_id': span.GetId(),
                    'trace_id': getattr(span, 'trace_id', 'unknown'),
                    'operation': span.op_name,
                    'fields': getattr(span, 'content', {})
                }
                predictor.live_out_spans.append(span_data)
                predictor.out_spans_by_process[process].append(span_data)

        # 转换 in_spans (server spans)
        for process, in_spans in self.in_spans_by_process.items():
            if process not in predictor.in_spans_by_process:
                predictor.in_spans_by_process[process] = []
            for span in in_spans:
                span_data = {
                    'span_id': span.GetId(),
                    'trace_id': getattr(span, 'trace_id', 'unknown'),
                    'operation': span.op_name,
                    'fields': getattr(span, 'content', {})
                }
                predictor.live_in_spans.append(span_data)
                predictor.in_spans_by_process[process].append(span_data)

    def _restore_predictions(self, links: Dict[str, str]):
        """将预测结果转换回原始格式"""
        # links 格式: {out_span_id -> in_span_id}

        # 构建 span_id 到 span 对象的映射
        span_id_to_span = {}
        for span_id, span in self.all_spans.items():
            span_id_to_span[span.GetId()] = span

        # 转换预测结果
        for out_span_id, in_span_id in links.items():
            if out_span_id in span_id_to_span and in_span_id in span_id_to_span:
                out_span = span_id_to_span[out_span_id]
                in_span = span_id_to_span[in_span_id]

                # 获取 out_endpoint
                out_ep = out_span.GetChildEndpoint(self.all_processes, self.all_spans)
                if out_ep:
                    self.pred_assignments[out_ep][out_span.GetId()] = in_span.GetId()

    def evaluate_predictions(self):
        """步骤4: 对比预测结果与真实关联关系，输出准确率"""
        print("\n" + "=" * 80)
        print("步骤4: 评估预测结果")
        print("=" * 80)

        # 只关注 checkout 进程的预测
        checkout_predictions = {}
        checkout_ground_truth = {}

        # 收集 checkout 相关的预测和真实关联
        for out_ep, assignments in self.pred_assignments.items():
            if "checkout" in out_ep.lower():
                checkout_predictions[out_ep] = assignments

        for out_ep, assignments in self.true_assignments.items():
            if "checkout" in out_ep.lower():
                checkout_ground_truth[out_ep] = assignments

        # 计算准确率
        ground_truth = sum(len(assignments) for assignments in checkout_ground_truth.values())

        total_predictions = sum(len(assignments) for assignments in checkout_predictions.values())

        correct_predictions = 0
        for out_ep in checkout_predictions:  # 逐个操作对比
            pred_assignments = checkout_predictions[out_ep]
            true_assignments = checkout_ground_truth.get(out_ep, {})

            for in_span_id, out_span_id in pred_assignments.items():
                # 检查预测是否正确
                if in_span_id in true_assignments:
                    true_out_span_id = true_assignments[in_span_id]
                    if out_span_id == true_out_span_id:
                        correct_predictions += 1

        # 计算指标
        accuracy = correct_predictions / ground_truth if ground_truth > 0 else 0.0

        print(f"总预测数: {total_predictions}")
        print(f"正确预测数: {correct_predictions}")
        print(f"真值数: {ground_truth}")
        print(f"准确率: {accuracy:.2%}")

    def evaluate_predictions_fast(self):
        """
        更快速地判断关联关系是否正确：直接比较 pred_assignments 当中 in/out span 的 trace_id 是否相等。
        这种方式不依赖于 true_assignments、不需要构建正确的 true_assignments，且与服务无关，且也不需要拼接。
        """
        print("\n" + "=" * 80)
        print("步骤4: 评估预测结果")
        print("=" * 80)

        def get_svc_from_ep(ep):
            """从 endpoint 中获取 service name"""
            return ep.split(",")[0]

        total_ground_truth = 0
        for out_ep, spans in self.out_spans_by_process.items():
            if get_svc_from_ep(out_ep) in self.blocked_services:
                continue

            # 进程上的真值数
            total_ground_truth += len(spans)

        correct_predictions = 0
        total_predictions = 0
        for out_ep, assignments in self.pred_assignments.items():
            if get_svc_from_ep(out_ep) in self.blocked_services:
                continue

            # 进程上的预测数
            for in_span_id, out_span_id in assignments.items():
                if in_span_id[0] == out_span_id[0]:
                    correct_predictions += 1
                total_predictions += 1

        accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0.0
        # accuracy = correct_predictions / total_ground_truth if total_ground_truth > 0 else 0.0

        print(f"总预测数: {total_predictions}")
        print(f"正确预测数: {correct_predictions}")
        # print(f"真值数: {total_ground_truth}")
        print(f"准确率: {accuracy:.2%}")

    def run_pipeline(self):
        """运行完整的流水线"""
        print("开始执行完整的 Span 关联预测流水线")
        print("=" * 80)

        try:
            # 步骤1: 加载数据
            self.load_data()

            # 步骤2: 分析字段共现
            self.analyze_field_cooccurrence()

            # 步骤3: 预测关联关系
            self.predict_span_links()

            # 步骤4: 评估结果
            # self.evaluate_predictions()
            self.evaluate_predictions_fast()

            print("\n" + "=" * 80)
            print("流水线执行完成!")
            print("=" * 80)

        except Exception as e:
            print(f"执行过程中出现错误: {e}")
            import traceback
            traceback.print_exc()
            return None


def main():
    """主函数"""

    # otel-demo 应用全体服务名单（不计入边缘服务）
    supported_services = ['frontend-proxy', 'frontend', 'checkout', 'cart', 'shipping', 'recommendation']
    #
    supported_weight_methods = ['idf', 'entropy', 'bm25']
    #
    supported_predict_methods = ['CbT', 'FCFS']

    # 命令行参数
    parser = argparse.ArgumentParser(description='执行完整的 Span 关联预测流水线')
    parser.add_argument('--data_file', type=str,
                        default='data/cbt/checkout_100_user50.json',
                        help='数据文件路径')
    parser.add_argument('--test_case', type=str,
                        choices=supported_services,
                        default='checkout',
                        help='测试服务')
    parser.add_argument('--weight_method', type=str,
                        choices=supported_weight_methods,
                        default='entropy',
                        help='权重计算方法')
    parser.add_argument('--weight_topk', type=int,
                        default=5,
                        help='权重 Top-K 参数，用于选择权重最高的 K 个字段')
    parser.add_argument('--predict_method', type=str,
                        choices=supported_predict_methods,
                        default='CbT',
                        help='预测方法')

    args = parser.parse_args()

    # 创建实验上下文
    # 1.实验数据集是否存在
    if not os.path.exists(args.data_file):
        print(f"错误: 数据文件 {args.data_file} 不存在")
        sys.exit(1)
    # 2.根据 case 选服务名单
    blocked_services = [s for s in supported_services if s != args.test_case]

    # 创建执行器
    executor = SpanLinkExecutor(args.data_file, blocked_services, args.weight_method, args.weight_topk,
                                args.predict_method)

    # 运行流水线
    executor.run_pipeline()


if __name__ == '__main__':
    main()
