# -*- coding: utf-8 -*-
from __future__ import annotations

import argparse
import json
import os
import random
import time
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional

# 提前限制数值库线程数（子进程会继承这些环境变量）
os.environ.setdefault("NUMBA_NUM_THREADS", "1")
os.environ.setdefault("OMP_NUM_THREADS", "1")
os.environ.setdefault("MKL_NUM_THREADS", "1")
os.environ.setdefault("OPENBLAS_NUM_THREADS", "1")

import numpy as np
from numba import njit

# -----------------------------
# Python-side data structures
# -----------------------------

@dataclass
class SoftStep:
    threshold: int       # 抽数阈值（1-based）
    rate_bp: int         # 概率（基点：万分制 0..10000）

@dataclass
class Group:
    group_id: int
    name: str
    base_rate_bp: int
    hard_pity: int
    priority: int        # 优先级：数值越大越先结算
    soft_steps: List[SoftStep] = field(default_factory=list)

# -----------------------------
# Numba kernels (nopython)
# -----------------------------

@njit(cache=True, fastmath=True)
def _prep_slopes_intercepts(thresholds_row, rates_row, L):
    # 线性分段：为每段计算斜率与截距；当 L<=1 时退化为零向量
    if L <= 1:
        return np.zeros(1, np.float64), np.zeros(1, np.float64)

    slopes = np.empty(L - 1, np.float64)
    intercepts = np.empty(L - 1, np.float64)

    for j in range(L - 1):
        dx = thresholds_row[j + 1] - thresholds_row[j]
        inv_dx = 1.0 / dx  # 预先计算 1/dx，后续用乘法代替除法
        dy = rates_row[j + 1] - rates_row[j]
        s = dy * inv_dx
        slopes[j] = s
        intercepts[j] = rates_row[j] - s * thresholds_row[j]

    return slopes, intercepts

@njit(cache=True, fastmath=True)
def compile_soft_steps_arrays_numba(thresholds_2d, rates_2d, lens):
    # 预计算每个分组的分段斜率/截距及“软保底=100%”阈值
    n_groups = thresholds_2d.shape[0]
    max_steps = thresholds_2d.shape[1]
    max_segments = max_steps - 1 if max_steps > 0 else 0

    slopes_2d = np.zeros((n_groups, max_segments), np.float64)
    intercepts_2d = np.zeros((n_groups, max_segments), np.float64)
    soft100_thresholds = np.zeros(n_groups, np.int32)

    for i in range(n_groups):
        L = lens[i]
        if L == 0:
            soft100_thresholds[i] = 0
            continue

        # 找到达到/超过 10000 bp 的最早阈值（如无则为 0）
        t100 = 0
        for j in range(L):
            if rates_2d[i, j] >= 10000:
                t100 = thresholds_2d[i, j]
                break
        soft100_thresholds[i] = t100

        if L >= 2:
            slopes, intercepts = _prep_slopes_intercepts(
                thresholds_2d[i, :L],
                rates_2d[i, :L],
                L,
            )
            slopes_2d[i, : L - 1] = slopes
            intercepts_2d[i, : L - 1] = intercepts

    return slopes_2d, intercepts_2d, soft100_thresholds

@njit(cache=True, fastmath=True)
def _rate_from_seg(
    draw_n,
    base_rate,
    thresholds_row,
    rates_row,
    slopes_row,
    intercepts_row,
    L,
    seg,
    use_ceil,
):
    # 根据当前段 seg 线性插值求 bp，并与基础概率取最大
    if L == 0:
        return base_rate

    if draw_n <= thresholds_row[0]:
        r = rates_row[0]
        return base_rate if base_rate > r else r

    last_t = thresholds_row[L - 1]
    if draw_n >= last_t:
        r = rates_row[L - 1]
        return base_rate if base_rate > r else r

    y = slopes_row[seg] * draw_n + intercepts_row[seg]
    y_int = int(np.ceil(y)) if use_ceil else int(np.round(y))
    return base_rate if base_rate > y_int else y_int

@njit(cache=True, fastmath=True)
def _update_seg_idx_after_increment(dn, thresholds_row, L, seg, right_closed):
    # O(1) 段推进：越过下一个阈值才移动 seg
    if L <= 1:
        return 0

    nxt = seg + 1
    if nxt >= L - 1:
        if right_closed:
            if dn >= thresholds_row[L - 1]:
                return L - 2
        else:
            if dn > thresholds_row[L - 1]:
                return L - 2
        return seg

    if right_closed:
        if dn >= thresholds_row[nxt]:
            return seg + 1
    else:
        if dn > thresholds_row[nxt]:
            return seg + 1
    return seg

@njit(cache=True, fastmath=True)
def _calc_rates_inplace(
    counters,
    thresholds_2d,
    rates_2d,
    lens,
    slopes_2d,
    intercepts_2d,
    base_rates,
    seg_idx,
    out_rates,
    use_ceil,
):
    # 计算所有分组的当前命中概率（bp），写入 out_rates
    n_groups = counters.shape[0]
    for i in range(n_groups):
        dn = counters[i] + 1
        L = lens[i]
        out_rates[i] = _rate_from_seg(
            dn,
            base_rates[i],
            thresholds_2d[i],
            rates_2d[i],
            slopes_2d[i],
            intercepts_2d[i],
            L,
            seg_idx[i],
            use_ceil,
        )

# 轻量级 LCG 随机数生成器（32-bit 状态，返回高 16 位作为均匀随机）
@njit(cache=True, fastmath=True)
def _lcg_next(state):
    state = (state * np.uint32(1664525) + np.uint32(1013904223))
    return state, state >> np.uint32(16)

@njit(cache=True, fastmath=True)
def simulate_batch_fast(
    n_draws,
    seed32,
    group_priorities,
    base_rates,
    hard_pity_values,
    thresholds_2d,
    rates_2d,
    lens,
    slopes_2d,
    intercepts_2d,
    soft100_thresholds,
    right_closed,
    use_ceil,
    priority_order_asc,
    lowest_idx,
):
    # 主循环（内核 RNG）：支持硬/软保底、优先级竞争与总和归一化
    n_groups = group_priorities.shape[0]
    counters = np.zeros(n_groups, np.int32)
    seg_idx = np.zeros(n_groups, np.int32)
    hits = np.zeros(n_groups, np.int32)
    current_rates = np.empty(n_groups, np.int32)
    state = np.uint32(seed32 if seed32 != 0 else 123456789)

    for _ in range(n_draws):
        any_guarantee = False
        best_pri = -2147483648
        hit_group = 0

        # 先检查保底候选，按优先级取最高者
        for i in range(n_groups):
            dn = counters[i] + 1
            g = 0
            if hard_pity_values[i] > 0 and dn >= hard_pity_values[i]:
                g = 1
            elif soft100_thresholds[i] > 0 and dn >= soft100_thresholds[i]:
                g = 1
            if g == 1:
                if group_priorities[i] > best_pri:
                    best_pri = group_priorities[i]
                    hit_group = i
                any_guarantee = True

        if not any_guarantee:
            # 无保底：按插值概率抽取，并将总和校正到 10000 bp
            _calc_rates_inplace(
                counters,
                thresholds_2d,
                rates_2d,
                lens,
                slopes_2d,
                intercepts_2d,
                base_rates,
                seg_idx,
                current_rates,
                use_ceil,
            )
            total = 0
            for i in range(n_groups):
                total += current_rates[i]

            if total > 10000:
                excess = total - 10000
                for k in range(priority_order_asc.shape[0]):
                    if excess <= 0:
                        break
                    idx = priority_order_asc[k]
                    red = current_rates[idx] if current_rates[idx] < excess else excess
                    current_rates[idx] -= red
                    excess -= red
            elif total < 10000:
                current_rates[lowest_idx] += (10000 - total)

            # 1..10000 均匀抽签
            state, rv = _lcg_next(state)
            roll = 1 + int(rv % np.uint32(10000))
            csum = 0
            for i in range(n_groups):
                csum += current_rates[i]
                if roll <= csum:
                    hit_group = i
                    break

        # 命中分组并推进计数/段索引
        hits[hit_group] += 1
        for i in range(n_groups):
            if i == hit_group:
                counters[i] = 0
                seg_idx[i] = 0
            else:
                counters[i] += 1
                L = lens[i]
                if L > 1:
                    dn_next = counters[i] + 1
                    seg_idx[i] = _update_seg_idx_after_increment(
                        dn_next, thresholds_2d[i], L, seg_idx[i], right_closed
                    )
    return hits

@njit(cache=True, fastmath=True)
def simulate_batch_with_rolls(
    n_draws,
    random_rolls,
    group_priorities,
    base_rates,
    hard_pity_values,
    thresholds_2d,
    rates_2d,
    lens,
    slopes_2d,
    intercepts_2d,
    soft100_thresholds,
    right_closed,
    use_ceil,
    priority_order_asc,
    lowest_idx,
):
    # 主循环（外部提供 uint16 随机数）：流程与 simulate_batch_fast 相同
    n_groups = group_priorities.shape[0]
    counters = np.zeros(n_groups, np.int32)
    seg_idx = np.zeros(n_groups, np.int32)
    hits = np.zeros(n_groups, np.int32)
    current_rates = np.empty(n_groups, np.int32)

    for draw_idx in range(n_draws):
        any_guarantee = False
        best_pri = -2147483648
        hit_group = 0

        for i in range(n_groups):
            dn = counters[i] + 1
            g = 0
            if hard_pity_values[i] > 0 and dn >= hard_pity_values[i]:
                g = 1
            elif soft100_thresholds[i] > 0 and dn >= soft100_thresholds[i]:
                g = 1
            if g == 1:
                if group_priorities[i] > best_pri:
                    best_pri = group_priorities[i]
                    hit_group = i
                any_guarantee = True

        if not any_guarantee:
            _calc_rates_inplace(
                counters,
                thresholds_2d,
                rates_2d,
                lens,
                slopes_2d,
                intercepts_2d,
                base_rates,
                seg_idx,
                current_rates,
                use_ceil,
            )
            total = 0
            for i in range(n_groups):
                total += current_rates[i]

            if total > 10000:
                excess = total - 10000
                for k in range(priority_order_asc.shape[0]):
                    if excess <= 0:
                        break
                    idx = priority_order_asc[k]
                    red = current_rates[idx] if current_rates[idx] < excess else excess
                    current_rates[idx] -= red
                    excess -= red
            elif total < 10000:
                current_rates[lowest_idx] += (10000 - total)

            roll = int(random_rolls[draw_idx])
            csum = 0
            for i in range(n_groups):
                csum += current_rates[i]
                if roll <= csum:
                    hit_group = i
                    break

        hits[hit_group] += 1
        for i in range(n_groups):
            if i == hit_group:
                counters[i] = 0
                seg_idx[i] = 0
            else:
                counters[i] += 1
                L = lens[i]
                if L > 1:
                    dn_next = counters[i] + 1
                    seg_idx[i] = _update_seg_idx_after_increment(
                        dn_next, thresholds_2d[i], L, seg_idx[i], right_closed
                    )
    return hits

# -----------------------------
# Python-side wrapper
# -----------------------------

class OptimizedGachaSimulator:
    # 负责整理分组数据→Numba 所需数组，并提供单批模拟接口
    def __init__(
        self,
        groups: List[Group],
        seed: Optional[int] = None,
        defer_on_ties: bool = False,
        right_closed: bool = False,
        interp: str = "round",
    ):
        self.groups: Dict[int, Group] = {g.group_id: g for g in groups}
        self.rng = random.Random(seed)
        self.defer_on_ties = defer_on_ties
        self.deferred_queue: List[int] = []
        self.right_closed = right_closed
        self.interp = interp
        self._compile_numba_data()

    def _compile_numba_data(self):
        # 将结构体列表转换为 SoA；排序确保 group_id 与数组索引一致
        sorted_groups = sorted(self.groups.values(), key=lambda g: g.group_id)

        self.group_ids = np.asarray([g.group_id for g in sorted_groups], dtype=np.int32)
        self.group_priorities = np.asarray([g.priority for g in sorted_groups], dtype=np.int32)
        self.base_rates = np.asarray([g.base_rate_bp for g in sorted_groups], dtype=np.int32)
        self.hard_pity_values = np.asarray([g.hard_pity for g in sorted_groups], dtype=np.int32)

        n = len(sorted_groups)
        max_steps = max((len(g.soft_steps) for g in sorted_groups), default=0)

        thresholds_2d = np.full((n, max_steps), -1, dtype=np.int32)
        rates_2d = np.zeros((n, max_steps), dtype=np.int32)
        lens = np.zeros(n, dtype=np.int32)

        for i, g in enumerate(sorted_groups):
            steps = sorted(g.soft_steps, key=lambda s: s.threshold)
            L = len(steps)
            lens[i] = L
            for j in range(L):
                thresholds_2d[i, j] = steps[j].threshold
                rates_2d[i, j] = steps[j].rate_bp

        slopes_2d, intercepts_2d, soft100_thresholds = compile_soft_steps_arrays_numba(
            thresholds_2d, rates_2d, lens
        )

        self.thresholds_2d = thresholds_2d
        self.rates_2d = rates_2d
        self.lens = lens
        self.slopes_2d = slopes_2d
        self.intercepts_2d = intercepts_2d
        self.soft100_thresholds = soft100_thresholds

        # 预备数据：按优先级从低到高的索引序 & 最低优先级索引
        self.priority_order_asc = np.argsort(self.group_priorities).astype(np.int32)
        self.lowest_idx = int(self.priority_order_asc[0])

    def simulate_draws_optimized(self, n: int, rng_mode: str = "lcg") -> Dict[str, Any]:
        # 运行 n 次抽取，返回各分组命中次数（按优先级降序显示）
        seed32 = np.int32(self.rng.randint(1, 2**31 - 1))

        if rng_mode == "vector":
            from numpy.random import PCG64, Generator

            gen = Generator(PCG64(int(seed32)))
            # 使用 uint16 生成 1..10000 的取值，内存占用更小
            random_rolls = gen.integers(1, 10001, size=n, dtype=np.uint16)
            hits_array = simulate_batch_with_rolls(
                n,
                random_rolls,
                self.group_priorities,
                self.base_rates,
                self.hard_pity_values,
                self.thresholds_2d,
                self.rates_2d,
                self.lens,
                self.slopes_2d,
                self.intercepts_2d,
                self.soft100_thresholds,
                self.right_closed,
                self.interp == "ceil",
                self.priority_order_asc,
                self.lowest_idx,
            )
        else:
            # 默认 LCG：在内核中生成随机数，避免大规模随机数组
            hits_array = simulate_batch_fast(
                n,
                seed32,
                self.group_priorities,
                self.base_rates,
                self.hard_pity_values,
                self.thresholds_2d,
                self.rates_2d,
                self.lens,
                self.slopes_2d,
                self.intercepts_2d,
                self.soft100_thresholds,
                self.right_closed,
                self.interp == "ceil",
                self.priority_order_asc,
                self.lowest_idx,
            )

        id2name = {g.group_id: g.name for g in self.groups.values()}
        hits_named = {
            id2name[gid]: int(hits_array[i]) for i, gid in enumerate(self.group_ids)
        }
        ordered_ids = [
            g.group_id
            for g in sorted(self.groups.values(), key=lambda g: g.priority, reverse=True)
        ]
        hits_ordered = {id2name[gid]: hits_named.get(id2name[gid], 0) for gid in ordered_ids}
        return {"hits": hits_ordered}

# -----------------------------
# Defaults
# -----------------------------

DEFAULT_GROUPS = [
    Group(50350, "传说", 100, 70, 4, [SoftStep(0, 100), SoftStep(40, 100), SoftStep(69, 2710), SoftStep(70, 10000)]),
    Group(50368, "史诗", 700, 30, 3, [SoftStep(0, 700), SoftStep(14, 700), SoftStep(29, 1000), SoftStep(30, 10000)]),
    Group(50386, "稀有", 2200, 10, 2, [SoftStep(0, 2200), SoftStep(4, 2200), SoftStep(9, 2400), SoftStep(10, 10000)]),
    Group(52011, "其他/碎片", 7000, 0, 1, []),
]

# -----------------------------
# Multiprocessing helpers
# -----------------------------

def _run_chunk_optimized(args):
    # Windows 使用 spawn，新进程需再次限制数值库线程
    os.environ.setdefault("NUMBA_NUM_THREADS", "1")
    os.environ.setdefault("OMP_NUM_THREADS", "1")
    os.environ.setdefault("MKL_NUM_THREADS", "1")
    os.environ.setdefault("OPENBLAS_NUM_THREADS", "1")

    pulls, seed, defer, right_closed, interp, rng_mode = args
    sim = OptimizedGachaSimulator(
        DEFAULT_GROUPS,
        seed=seed,
        defer_on_ties=defer,
        right_closed=right_closed,
        interp=interp,
    )
    res = sim.simulate_draws_optimized(pulls, rng_mode=rng_mode)
    return res["hits"]

def _merge_hits(results: List[Dict[str, int]]) -> Dict[str, int]:
    # 合并每个分片的命中统计
    if not results:
        return {}
    all_keys = set()
    for r in results:
        all_keys.update(r.keys())
    merged = {k: 0 for k in all_keys}
    for r in results:
        for k, v in r.items():
            merged[k] += v
    return merged

def _format_time(seconds: float) -> str:
    # 友好时间显示：SS / MM:SS / H:MM:SS
    if seconds < 60:
        return f"{seconds:.1f}s"
    m, s = divmod(int(seconds), 60)
    if m < 60:
        return f"{m:02d}:{s:02d}"
    h, m = divmod(m, 60)
    return f"{h:d}:{m:02d}:{s:02d}"

def _print_progress(completed: int, total: int, start_ts: float, last_update: float):
    # 进度条：~0.1s 节流，避免频繁刷新
    now = time.perf_counter()
    if (now - last_update) < 0.1 and completed != total:
        return last_update
    width = 30
    frac = completed / total if total else 1.0
    filled = int(frac * width)
    bar = "█" * filled + "·" * (width - filled)
    elapsed = now - start_ts
    eta = (elapsed / completed * (total - completed)) if completed else 0.0
    print(
        f"\r进度 |{bar}| {completed}/{total}  {frac*100:5.1f}%  "
        f"耗时 { _format_time(elapsed) }  预计剩余 { _format_time(eta) }",
        end="",
        flush=True,
    )
    return now

# 自动满核：默认 workers=os.cpu_count()；大任务按 ~workers×8 切片
def run_optimized_simulation(
    pulls: int,
    seed: Optional[int] = None,
    defer: bool = False,
    right_closed: bool = False,
    interp: str = "round",
    workers: Optional[int] = None,
    chunk_size: Optional[int] = None,
    show_progress: bool = False,
    rng_mode: str = "lcg",
) -> Dict[str, int]:
    import multiprocessing as mp
    from concurrent.futures import ProcessPoolExecutor, as_completed

    if pulls <= 0:
        return {}

    if workers is None:
        workers = max(1, os.cpu_count() or 1)

    total = int(pulls)

    # 自适应切片：小任务单片；大任务目标片数 ≈ workers×8
    if total < 100_000:
        chunk = total
        num_chunks = 1
    else:
        target_chunks = max(workers * 8, 1)
        base = max(total // target_chunks, 10_000)
        if chunk_size is not None:
            base = min(base, int(chunk_size))
        chunk = base
        num_chunks = (total + chunk - 1) // chunk

    # 子种子：通过 SeedSequence 生成独立随机流
    base_seed = seed if seed is not None else random.randrange(1 << 30)
    ss = np.random.SeedSequence(base_seed)
    child_ss = ss.spawn(num_chunks)
    seeds = [
        int(np.random.Generator(np.random.PCG64(s)).integers(0, 2**31 - 1))
        for s in child_ss
    ]

    tasks = []
    for i in range(num_chunks):
        pulls_i = chunk if i < num_chunks - 1 else (total - chunk * (num_chunks - 1))
        tasks.append((pulls_i, seeds[i], defer, right_closed, interp, rng_mode))

    # Windows 采用 spawn，上线前先本进程小规模预热以触发 numba 缓存
    try:
        ctx = mp.get_context("spawn")
    except Exception:
        ctx = mp.get_context()

    try:
        _ = OptimizedGachaSimulator(
            DEFAULT_GROUPS,
            seed=(seed or 1),
            defer_on_ties=defer,
            right_closed=right_closed,
            interp=interp,
        ).simulate_draws_optimized(1, rng_mode=rng_mode)
    except Exception:
        pass

    results: List[Dict[str, int]] = []

    if num_chunks == 1 and workers == 1:
        # 超小任务：直接单进程执行以省去进程开销
        results = [_run_chunk_optimized(tasks[0])]  # type: ignore
    else:
        with ProcessPoolExecutor(max_workers=workers, mp_context=ctx) as ex:
            if show_progress:
                futures = [ex.submit(_run_chunk_optimized, t) for t in tasks]
                start_ts = time.perf_counter()
                completed = 0
                last_update = start_ts
                _print_progress(0, num_chunks, start_ts, last_update)
                for fut in as_completed(futures):
                    try:
                        results.append(fut.result())
                    except Exception:
                        results.append({})
                    completed += 1
                    last_update = _print_progress(completed, num_chunks, start_ts, last_update)
                print()
            else:
                results = list(ex.map(_run_chunk_optimized, tasks, chunksize=1))

    return _merge_hits(results)

# -----------------------------
# CLI
# -----------------------------

if __name__ == "__main__":
    import multiprocessing as mp

    mp.freeze_support()  # Windows 可执行/交互式环境兼容

    parser = argparse.ArgumentParser(description="Gacha 模拟器（Windows Fullspeed 版）")
    parser.add_argument("pulls", type=int, help="总抽数（必填）")
    parser.add_argument("--seed", type=int, default=None, help="基础随机种子（可选）")
    parser.add_argument("--json", action="store_true", help="以 JSON 格式输出结果")
    parser.add_argument("--defer", action="store_true", help="启用并发保底延迟（预留功能）")
    parser.add_argument("--right-closed", action="store_true", help="阈值区间采用左闭右闭；默认左闭右开")
    parser.add_argument("--interp", choices=["round", "ceil"], default="round", help="插值取整方式：round=四舍五入，ceil=向上取整")
    parser.add_argument("--workers", type=int, default=None, help="工作进程数（默认自动：os.cpu_count()）")
    parser.add_argument("--chunk-size", type=int, default=None, help="每分片的最大抽数上限（可选）")
    parser.add_argument("--benchmark", action="store_true", help="显示性能指标")
    parser.add_argument("--no-progress", action="store_true", help="禁用进度条")
    parser.add_argument("--quiet", "-q", action="store_true", help="安静模式：最少输出（抑制横幅与进度）")
    parser.add_argument("--rng", choices=["lcg", "vector"], default="lcg", help="随机模式：lcg（内核 LCG）或 vector（PCG64+uint16 预生成）")
    args = parser.parse_args()

    show_progress = (not args.no_progress) and (not args.quiet) and (not args.json)

    if not args.quiet and not args.json:
        print(
            f"优化版Gacha模拟器 - 模拟 {args.pulls:,} 次抽取  | RNG: {args.rng} | "
            f"workers: {args.workers or os.cpu_count()}"
        )
        if show_progress:
            print()

    start = time.perf_counter()

    agg = run_optimized_simulation(
        pulls=args.pulls,
        seed=args.seed,
        defer=args.defer,
        right_closed=args.right_closed,
        interp=args.interp,
        workers=args.workers,
        chunk_size=args.chunk_size,
        show_progress=show_progress,
        rng_mode=args.rng,
    )

    end = time.perf_counter()

    if args.benchmark and not args.quiet:
        elapsed = end - start
        pulls_per_sec = args.pulls / elapsed if elapsed > 0 else 0.0
        workers_used = args.workers or (os.cpu_count() or 1)
        efficiency = pulls_per_sec / workers_used if workers_used > 0 else 0.0

        print(f"\n=== 性能指标 ===")
        print(f"执行时间: {elapsed:.3f}秒")
        print(f"每秒模拟: {pulls_per_sec:,.0f} 抽")
        print(f"工作进程: {workers_used}")
        print(f"每进程效率: {efficiency:,.0f} 抽/秒/进程")
        mem_peak_est = (args.pulls * (2 if args.rng == 'vector' else 0) / 1024 / 1024)  # uint16 随机数组（仅 vector 模式）
        print(f"内存效率: ~{mem_peak_est:.1f} MB 额外随机数内存开销")
        print(f"总计算量: {args.pulls:,} 次抽取模拟")
        print("=" * 40)

    if args.json:
        print(json.dumps(agg, ensure_ascii=False, indent=2))
    else:
        if not args.quiet:
            print(f"\n=== 模拟结果 ===")
            total_hits = sum(agg.values())
            print("命中统计:")
            for item, count in agg.items():
                percentage = (count / total_hits * 100) if total_hits > 0 else 0.0
                print(f"  {item}: {count:,} ({percentage:.2f}%)")
        else:
            print("命中统计:", agg)
