#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
对 data/train_processed.json 进行文本长度统计，结果保存为 data/statistical_information.txt
- 统计指标：最小值、最大值、均值、中位数、四分位数(Q1、Q3)、95 分位数、99 分位数
- 分批处理：不一次性加载全部数据，使用外部排序(k-way merge)精确计算分位数
- 输入格式：train_processed.json 为 JSON 数组，每行一个对象（中间以逗号分隔，首尾有方括号）
- 对象结构：{"fact": str, "accusation": [...], "categories": [...]}，仅使用 fact 计算长度

可选参数：
--input, --output, --chunk-size, --tmp-dir, --max-rows
"""

import argparse
import heapq
import json
import math
import os
import sys
import tempfile
from typing import Iterable, List, Tuple, Dict


def iter_json_array_objects(path: str, max_rows: int | None = None) -> Iterable[Dict]:
    """按行增量读取 JSON 数组文件中的对象（每行一个对象，首尾为 [ 与 ]）。"""
    count = 0
    with open(path, 'r', encoding='utf-8') as f:
        for line in f:
            s = line.strip()
            if not s:
                continue
            if s == '[' or s == ']':
                continue
            # 去掉行尾逗号
            if s.endswith(','):
                s = s[:-1].rstrip()
            if not s:
                continue
            obj = json.loads(s)
            yield obj
            count += 1
            if max_rows is not None and count >= max_rows:
                return


def write_sorted_chunk(lengths: List[int], tmp_dir: str) -> str:
    lengths.sort()
    fd, path = tempfile.mkstemp(prefix='len_chunk_', suffix='.txt', dir=tmp_dir, text=True)
    # 使用 os.fdopen 以确保文件描述符被正确关闭
    with os.fdopen(fd, 'w', encoding='utf-8') as w:
        for v in lengths:
            w.write(str(v))
            w.write('\n')
    return path


def stream_sorted_file(path: str) -> Iterable[int]:
    with open(path, 'r', encoding='utf-8') as f:
        for line in f:
            s = line.strip()
            if s:
                yield int(s)


def k_way_merge(files: List[str]) -> Iterable[int]:
    """k路归并多个已排序的长度文件，按升序逐个产出。"""
    # 初始化迭代器与堆
    iters = [stream_sorted_file(p) for p in files]
    heap: List[Tuple[int, int]] = []  # (value, file_index)
    for idx, it in enumerate(iters):
        try:
            v = next(it)
            heap.append((v, idx))
        except StopIteration:
            pass
    heapq.heapify(heap)

    while heap:
        v, idx = heapq.heappop(heap)
        yield v
        it = iters[idx]
        try:
            nv = next(it)
            heapq.heappush(heap, (nv, idx))
        except StopIteration:
            pass


def nearest_rank_index(p: float, n: int) -> int:
    """最近秩(Nearest-Rank)的1基索引，返回区间 [1, n]。
    例如 p=0.25, n=100 -> 25
    """
    if n <= 0:
        return 1
    rank = int(math.ceil(p * n))
    if rank < 1:
        rank = 1
    if rank > n:
        rank = n
    return rank


def compute_statistics(
    input_json: str,
    output_txt: str,
    chunk_size: int = 300_000,
    tmp_dir: str | None = None,
    max_rows: int | None = None,
) -> None:
    os.makedirs(os.path.dirname(output_txt), exist_ok=True)
    if tmp_dir is None:
        tmp_dir = os.path.dirname(output_txt)
    os.makedirs(tmp_dir, exist_ok=True)

    total = 0
    sum_len = 0
    min_len = None
    max_len = None

    chunk: List[int] = []
    tmp_files: List[str] = []

    # 第一遍：读入、累计基础统计并输出分块排序文件
    for obj in iter_json_array_objects(input_json, max_rows=max_rows):
        fact = obj.get('fact', '')
        if not isinstance(fact, str):
            # 非法行忽略
            continue
        L = len(fact)
        total += 1
        sum_len += L
        min_len = L if min_len is None else min(min_len, L)
        max_len = L if max_len is None else max(max_len, L)

        chunk.append(L)
        if len(chunk) >= chunk_size:
            path = write_sorted_chunk(chunk, tmp_dir)
            tmp_files.append(path)
            chunk = []
            print(f"写出分块文件: {path}", file=sys.stderr)

    # 最后一个分块
    if chunk:
        path = write_sorted_chunk(chunk, tmp_dir)
        tmp_files.append(path)
        chunk = []
        print(f"写出分块文件: {path}", file=sys.stderr)

    if total == 0:
        with open(output_txt, 'w', encoding='utf-8') as w:
            w.write('没有可统计的数据\n')
        return

    mean_len = sum_len / total

    # 需要的分位点
    percentiles = {
        'Q1(25%)': 0.25,
        '中位数(50%)': 0.50,
        'Q3(75%)': 0.75,
        'P95(95%)': 0.95,
        'P99(99%)': 0.99,
    }
    # 排序 rank 目标（1基索引）
    targets: List[Tuple[int, str]] = []  # (rank, label)
    for label, p in percentiles.items():
        targets.append((nearest_rank_index(p, total), label))
    targets.sort(key=lambda x: x[0])

    # k路归并并获取目标位置的值
    results: Dict[str, int] = {}
    if len(tmp_files) == 1:
        # 只有一个块，直接单文件按行读取到对应秩
        target_idx = 0
        current_rank = 0
        target_rank, label = targets[target_idx]
        for v in stream_sorted_file(tmp_files[0]):
            current_rank += 1
            while current_rank >= target_rank:
                results[label] = v
                target_idx += 1
                if target_idx >= len(targets):
                    break
                target_rank, label = targets[target_idx]
            if target_idx >= len(targets):
                break
    else:
        # 多块，使用k路归并
        target_idx = 0
        current_rank = 0
        target_rank, label = targets[target_idx]
        for v in k_way_merge(tmp_files):
            current_rank += 1
            while current_rank >= target_rank:
                results[label] = v
                target_idx += 1
                if target_idx >= len(targets):
                    break
                target_rank, label = targets[target_idx]
            if target_idx >= len(targets):
                break

    # 写出统计信息
    with open(output_txt, 'w', encoding='utf-8') as w:
        w.write('文本长度统计（单位：字符数）\n')
        w.write('—— 计算方法：分块外部排序 + 最近秩(Nearest Rank)分位数\n')
        w.write(f'总样本数: {total}\n')
        w.write(f'最小值: {min_len}\n')
        w.write(f'最大值: {max_len}\n')
        w.write(f'均值: {mean_len:.3f}\n')
        # 输出按固定顺序
        for label in ['Q1(25%)', '中位数(50%)', 'Q3(75%)', 'P95(95%)', 'P99(99%)']:
            val = results.get(label, 'N/A')
            w.write(f'{label}: {val}\n')

    # 清理临时分块文件
    for p in tmp_files:
        try:
            os.remove(p)
        except Exception:
            pass

    print(f"统计已完成，输出文件: {output_txt}")


def main() -> None:
    default_input = r'D:\LearnPython\crime-type-discrimination\data\train_processed.json'
    default_output = r'D:\LearnPython\crime-type-discrimination\data\statistical_information.txt'
    default_tmp = r'D:\LearnPython\crime-type-discrimination\data'

    parser = argparse.ArgumentParser(description='对文本长度进行分批统计（精确分位数）')
    parser.add_argument('--input', type=str, default=default_input, help='输入 JSON 数组文件路径')
    parser.add_argument('--output', type=str, default=default_output, help='输出统计文本路径')
    parser.add_argument('--chunk-size', type=int, default=300_000, help='分块大小（条）')
    parser.add_argument('--tmp-dir', type=str, default=default_tmp, help='临时文件目录')
    parser.add_argument('--max-rows', type=int, default=None, help='仅处理前N条（调试用）')

    args = parser.parse_args()

    compute_statistics(
        input_json=args.input,
        output_txt=args.output,
        chunk_size=args.chunk_size,
        tmp_dir=args.tmp_dir,
        max_rows=args.max_rows,
    )


if __name__ == '__main__':
    main()