#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import argparse
import sys
import time
import os, json
from typing import List, Dict

from filters import FilterRegistry
from config import RAW_DATA_DIR, PROCESSED_DATA_DIR, FILTER_CONFIG
from utils.utils import setup_logging, save_json, load_json, save_jsonl, append_jsonl_line
from crawler import (
    fetch_pull_requests, extract_pr_info,
    fetch_pr_commits, fetch_pr_files, extract_commit_messages, extract_file_diffs
)
from processors import UnitSplitter, UnitFormatter


class PRDataConstructor:
    """PR数据构建器 - 流式+处理一体化"""

    def __init__(self, filter_config=None, enable_stats=True):
        self.logger = setup_logging("PRDataConstructor")
        self.enable_stats = enable_stats
        self.run_ts = time.strftime('%Y%m%d_%H%M%S')

        if filter_config is None:
            try:
                filter_config = FILTER_CONFIG
            except ImportError:
                filter_config = {}

        try:
            filter_rules = FilterRegistry.create_filters_from_mapping(filter_config)
        except ImportError:
            filter_rules = []

        self.unit_splitter = UnitSplitter(filter_rules=filter_rules, enable_stats=enable_stats)
        self.formatter = UnitFormatter()

    # ---------------- 路径工具 ----------------
    def _raw_jsonl_path(self, state: str) -> str:
        return f"{RAW_DATA_DIR}/prs_{state}_{self.run_ts}.jsonl"

    def _raw_json_path(self, state: str) -> str:
        return f"{RAW_DATA_DIR}/prs_{state}_{self.run_ts}.json"

    def _processed_units_path(self, state: str) -> str:
        return f"{PROCESSED_DATA_DIR}/units_{state}_{self.run_ts}.jsonl"

    # ---------------- 筛选器统计 ----------------
    def get_filter_stats(self):
        return self.unit_splitter.get_filter_stats() if self.enable_stats else {}

    def get_detailed_filter_stats(self):
        return self.unit_splitter.get_detailed_filter_stats() if self.enable_stats else {}

    def print_filter_stats(self):
        if self.enable_stats:
            self.unit_splitter.print_filter_stats()
        else:
            self.logger.info("筛选统计未启用")

    def reset_filter_stats(self):
        if self.enable_stats:
            self.unit_splitter.reset_filter_stats()
            self.logger.info("筛选统计已重置")

    # ---------------- 核心流式抓取 ----------------
    def stream_pr_data(self, state="open", per_page=100, max_pages=None,
                       pr_callback=None) -> List[Dict]:
        """统一流式抓取PR数据，并通过回调处理每条PR"""
        self.logger.info(f"开始抓取PR数据，状态={state}")
        prs = fetch_pull_requests(state=state, per_page=per_page, max_pages=max_pages)
        self.logger.info(f"共获取 {len(prs)} 个PR")

        all_pr_data = []
        for i, pr in enumerate(prs):
            self.logger.info(f"处理PR {i+1}/{len(prs)}: #{pr['number']}")
            pr_info = extract_pr_info(pr)
            commits = fetch_pr_commits(pr_info["number"])
            pr_info["commit_messages"] = extract_commit_messages(commits)
            files = fetch_pr_files(pr_info["number"])
            diffs = extract_file_diffs(files)
            pr_data = {"pr_info": pr_info, "diffs": diffs}

            all_pr_data.append(pr_data)
            if pr_callback:
                try:
                    pr_callback(pr_data)
                except Exception as e:
                    self.logger.warning(f"回调处理PR失败: {e}")

        return all_pr_data

    # ---------------- 下载模式 ----------------
    def download_pr_data(self, state="open", per_page: int = 100, max_pages: int = None) -> str:
        raw_jsonl = self._raw_jsonl_path(state)
        raw_json = self._raw_json_path(state)

        os.makedirs(RAW_DATA_DIR, exist_ok=True)
        pr_list = []

        def callback(pr_data):
            append_jsonl_line(pr_data, raw_jsonl, ensure_ascii=False)
            pr_list.append(pr_data)

        self.stream_pr_data(state=state, per_page=per_page, max_pages=max_pages, pr_callback=callback)
        save_json(pr_list, raw_json)
        self.logger.info(f"下载完成: JSONL={raw_jsonl}, JSON={raw_json}")
        return raw_jsonl

    # ---------------- 处理模式 ----------------
    def process_raw_data(self, input_file, output_file=None, rejected_file=None):
        """处理raw数据文件（支持jsonl和json）"""
        self.logger.info(f"开始处理raw数据文件: {input_file}")
        pr_list = []
        if input_file.endswith(".jsonl"):
            with open(input_file, "r", encoding="utf-8") as f:
                for line in f:
                    pr_list.append(json.loads(line))
        else:
            pr_list = load_json(input_file)

        self.logger.info(f"加载了 {len(pr_list)} 个PR数据")

        if output_file is None:
            base = os.path.splitext(os.path.basename(input_file))[0]
            output_file = f"{PROCESSED_DATA_DIR}/units_{base}.jsonl"

        if rejected_file is None:
            rejected_file = output_file.replace(".jsonl", "_rejected.jsonl")

        all_units = []
        all_rejected = []

        def callback(pr_data):
            units, rejected = self.unit_splitter.split_pr_into_units(pr_data)
            valid_units = self.formatter.filter_valid_units(units)

            # 标记原因
            for u in rejected:
                u.setdefault("reason", "filtered_by_rules")
            for u in units:
                if u not in valid_units:
                    u["reason"] = "invalid_format"

            all_units.extend(valid_units)
            all_rejected.extend(rejected + [u for u in units if u not in valid_units])

            # 实时写入
            for u in valid_units:
                append_jsonl_line(u, output_file)
            if rejected_file:
                for u in rejected + [u for u in units if u not in valid_units]:
                    append_jsonl_line(u, rejected_file)

        for pr_data in pr_list:
            callback(pr_data)

        # 最终保存 JSON
        save_json(all_units, output_file.replace(".jsonl", ".json"))
        if rejected_file:
            save_json(all_rejected, rejected_file.replace(".jsonl", ".json"))

        self.logger.info(f"处理完成: {len(all_units)} 个有效unit, {len(all_rejected)} 个被筛除unit")

    # ---------------- deal模式 ----------------
    def deal(self, state="open", per_page: int = 100, max_pages: int = None, output_file=None, rejected_file=None):
        """先下载再处理"""
        temp_jsonl = self.download_pr_data(state, per_page=per_page, max_pages=max_pages)

        if output_file is None:
            base = os.path.splitext(os.path.basename(temp_jsonl))[0]
            output_file = f"{PROCESSED_DATA_DIR}/units_{base}.jsonl"

        if rejected_file is None:
            rejected_file = output_file.replace(".jsonl", "_rejected.jsonl")

        self.process_raw_data(temp_jsonl, output_file, rejected_file)


# ---------------- 命令行入口 ----------------
def main():
    parser = argparse.ArgumentParser(description="PR数据构建器")
    parser.add_argument('mode', choices=['download', 'process', 'deal'],
                        help='运行模式: download(仅下载), process(仅处理), deal(下载并处理)')
    parser.add_argument('--state', type=str, default="open",
                        help='PR状态(open/closed/all/merged)')
    parser.add_argument('--per_page', type=int, default=100,
                        help='每页数量（最大100）')
    parser.add_argument('--max_pages', type=int, default=0,
                        help='最多抓取页数，0表示全部')
    parser.add_argument('--output', type=str, default=None,
                        help='仅在 process 模式使用：输出目录或文件路径（建议目录）')
    parser.add_argument('--rejected', type=str, default=None,
                        help='仅在 process 模式使用：被筛除units保存目录（可选）')
    parser.add_argument('--input', type=str, default=None,
                        help='仅在 process 模式使用：输入文件或目录（必填）')

    args = parser.parse_args()
    constructor = PRDataConstructor()

    try:
        if args.mode == "download":
            # 将 max_pages=0 视为“全部”
            effective_max_pages = None if not args.max_pages or args.max_pages == 0 else args.max_pages
            # 提示无效参数（在 download 模式下忽略）
            if args.input or args.output or args.rejected:
                constructor.logger.warning("download 模式不需要 --input/--output/--rejected，已忽略这些参数")
            constructor.download_pr_data(state=args.state, per_page=args.per_page, max_pages=effective_max_pages)
            constructor.logger.info("下载成功完成！")
        elif args.mode == "process":
            if not args.input:
                constructor.logger.error("process 模式必须指定 --input 文件路径（支持 .jsonl 或 .json）")
                sys.exit(1)
            constructor.process_raw_data(
                input_file=args.input,
                output_file=args.output,
                rejected_file=args.rejected
            )
            # 处理完成后打印筛选器统计摘要
            constructor.print_filter_stats()
            constructor.logger.info("处理成功完成！")
        elif args.mode == "deal":
            # 将 max_pages=0 视为“全部”
            effective_max_pages = None if not args.max_pages or args.max_pages == 0 else args.max_pages
            # 提示无效参数（在 deal 模式下忽略）
            if args.input or args.output or args.rejected:
                constructor.logger.warning("deal 模式不需要 --input/--output/--rejected，已忽略这些参数")
            constructor.deal(
                state=args.state,
                per_page=args.per_page,
                max_pages=effective_max_pages,
                output_file=None,
                rejected_file=None
            )
            # 下载并处理完成后打印筛选器统计摘要
            constructor.print_filter_stats()
            constructor.logger.info("下载并处理成功完成！")
        sys.exit(0)
    except Exception as e:
        constructor.logger.exception(f"处理失败: {e}")
        sys.exit(1)


if __name__ == "__main__":
    main()
