import json
import os
from typing import Dict, Any
from config import CRAWL_BASE, CNKI_BASE, OUTPUT_BASE


def load_json_file(file_path: str) -> Dict[str, Any]:
    """
    加载JSON文件
    """
    try:
        with open(file_path, "r", encoding="utf-8") as f:
            return json.load(f)
    except Exception as e:
        print(f"Error loading {file_path}: {str(e)}")
        return {}


def normalize_title(title: str) -> str:
    """
    标准化标题，移除空格和标点符号
    """
    return title.strip().lower()


def merge_json_data(
    crawl_data: Dict[str, Any], cnki_data: Dict[str, Any]
) -> Dict[str, Any]:
    """
    合并两个JSON文件的数据
    """
    merged_data = {}

    # 创建标题和来源到文章的映射
    crawl_articles = {
        (normalize_title(article["title"]), normalize_title(article["source"])): article
        for article in crawl_data.get("articles", [])
    }

    # 遍历CNKI数据，查找匹配的文章
    for title, article_data in cnki_data.items():
        normalized_title = normalize_title(title)
        journal_name = article_data.get("article_info", {}).get("journal_name", "")
        normalized_journal_name = normalize_title(journal_name)
        crawl_article = crawl_articles.get((normalized_title, normalized_journal_name))

        # 添加调试信息
        print(f"Normalized CNKI Title: {normalized_title}")
        print(f"Journal Name: {journal_name}")
        print(f"Normalized Journal Name: {normalized_journal_name}")
        print(f"Crawl Article Found: {crawl_article is not None}")
        if crawl_article:
            print(f"Crawl Article Title: {crawl_article.get('title')}")
            print(f"Crawl Article Source: {crawl_article.get('source')}")

        if crawl_article:
            # 合并文献完整信息
            merged_article = {
                "journal_name": journal_name,  # 期刊名称
                "authors": crawl_article.get("authors", "").split(";"),  # 作者
                "affiliations": article_data.get("article_info", {}).get(
                    "affiliations", []
                ),  # 作者单位
                "abstract": article_data.get("article_info", {}).get(
                    "abstract", ""
                ),  # 摘要
                "funding": article_data.get("article_info", {}).get(
                    "funding", ""
                ),  # 基金
                "doi": article_data.get("article_info", {}).get("doi", ""),  # DOI
                "keywords": article_data.get("article_info", {}).get(
                    "keywords", ""
                ),  # 关键词
                "album": article_data.get("article_info", {}).get("album", ""),  # 专辑
                "publication_date": crawl_article.get("date", ""),  # 发表时间
                "quote": crawl_article.get("quote", ""),  # 被引量信息
                "download": crawl_article.get("download", ""),  # 下载量信息
                "references": article_data.get("references", []),  # 参考文献
            }
            merged_data[title] = merged_article

    return merged_data


def save_merged_data(data: Dict[str, Any], output_file: str):
    """
    保存合并后的数据到JSON文件
    """
    try:
        with open(output_file, "w", encoding="utf-8") as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        print(f"Merged data saved to {output_file}")
    except Exception as e:
        print(f"Error saving merged data: {str(e)}")


def main():
    def get_file_pairs():
        """获取需要合并的文件对"""

        os.makedirs(OUTPUT_BASE, exist_ok=True)
        os.makedirs(CNKI_BASE, exist_ok=True)
        os.makedirs(CRAWL_BASE, exist_ok=True)

        pairs = []
        # 递归遍历所有子目录
        for root, _, files in os.walk(CRAWL_BASE):
            for file in files:
                if file.endswith("_crawl.json"):
                    search_query = file.replace("_crawl.json", "")
                    crawl_file = os.path.join(root, file)

                    cnki_file = os.path.join(CNKI_BASE, f"CNKI_LY={search_query}.json")

                    output_file = os.path.join(
                        OUTPUT_BASE, f"{search_query}_merged.json"
                    )

                    # 确保输出目录存在
                    os.makedirs(os.path.dirname(output_file), exist_ok=True)
                    pairs.append((crawl_file, cnki_file, output_file))
        return pairs

    # 获取所有需要合并的文件对
    file_pairs = get_file_pairs()

    # 批量处理每对文件
    for crawl_file, cnki_file, output_file in file_pairs:
        print(f"\n处理文件对:\n{crawl_file}\n{cnki_file}\n-> {output_file}")

        # 加载数据
        crawl_data = load_json_file(crawl_file)
        cnki_data = load_json_file(cnki_file)

        # 合并数据
        merged_data = merge_json_data(crawl_data, cnki_data)

        # 保存结果
        save_merged_data(merged_data, output_file)

        # 打印统计信息
        print("\nStatistics:")
        print(f"Crawl data articles: {len(crawl_data.get('articles', []))}")
        print(f"CNKI data articles: {len(cnki_data)}")
        print(f"Merged articles: {len(merged_data)}")


def merge_all_results(output_base: str) -> Dict[str, Any]:
    """
    合并所有merged_results目录下的JSON文件
    """
    all_data = {}
    total_files = 0

    for root, _, files in os.walk(output_base):
        for file in files:
            if file.endswith("_merged.json"):
                file_path = os.path.join(root, file)
                data = load_json_file(file_path)
                all_data.update(data)
                total_files += 1

    return all_data, total_files


def generate_report(all_data: Dict[str, Any], total_files: int, output_path: str):
    """
    生成数据集报告
    """
    report = {
        "总体统计": {
            "合并文件总数": total_files,
            "文献总数": len(all_data),
            "包含DOI文献数": sum(1 for item in all_data.values() if item.get("doi")),
            "包含基金文献数": sum(
                1 for item in all_data.values() if item.get("funding")
            ),
            "包含参考文献文献数": sum(
                1 for item in all_data.values() if item.get("references")
            ),
        },
        "期刊统计": {},
        "年份统计": {},
    }

    # 统计期刊分布
    for article in all_data.values():
        journal = article.get("journal_name", "未知期刊")
        report["期刊统计"][journal] = report["期刊统计"].get(journal, 0) + 1

        # 统计年份分布
        year = article.get("publication_date", "")[:4]  # 提取年份
        if year.isdigit():
            report["年份统计"][year] = report["年份统计"].get(year, 0) + 1

    # 排序统计结果
    report["期刊统计"] = dict(
        sorted(report["期刊统计"].items(), key=lambda x: x[1], reverse=True)
    )
    report["年份统计"] = dict(sorted(report["年份统计"].items()))

    # 保存报告
    with open(output_path, "w", encoding="utf-8") as f:
        json.dump(report, f, ensure_ascii=False, indent=2)

    return report


if __name__ == "__main__":
    main()

    # 在处理完所有文件对后，合并所有结果
    print("\n开始合并所有结果...")
    all_data, total_files = merge_all_results(OUTPUT_BASE)

    # 保存完整数据集
    complete_dataset_path = os.path.join(OUTPUT_BASE, "complete_dataset.json")
    save_merged_data(all_data, complete_dataset_path)

    # 生成报告
    report_path = os.path.join(OUTPUT_BASE, "dataset_report.json")
    report = generate_report(all_data, total_files, report_path)

    # 打印报告摘要
    print("\n数据集报告摘要:")
    print(f"总文献数: {report['总体统计']['文献总数']}")
    print(f"合并文件数: {report['总体统计']['合并文件总数']}")
    print(f"包含DOI文献数: {report['总体统计']['包含DOI文献数']}")
    print(f"收录期刊数: {len(report['期刊统计'])}")
    print(
        f"年份跨度: {min(report['年份统计'].keys(), default='N/A')} - {max(report['年份统计'].keys(), default='N/A')}"
    )
