# -*- coding: UTF-8 -*-
'''
@Project ：academic_trend_analysis
@File    ：hot_term_analyzer.py
@IDE     ：PyCharm
@Author  ：iyoahs
@Date    ：2025/6/20 10:45
@Describe：热点关键词分析
'''
import re
import pandas as pd
from pyspark.sql.functions import lower, col
from rapidfuzz import fuzz, utils
from pyspark.sql import SparkSession
from pyspark.sql.types import BooleanType
from pyspark import RDD
from time import time
from collections import defaultdict
from typing import List, Dict, Tuple, Any
import warnings
from tqdm import tqdm
import findspark
from pyspark import SparkContext

findspark.init()

# 忽略警告
warnings.filterwarnings("ignore", category=UserWarning)


def normalize_text(text: str) -> str:
    """标准化文本处理"""
    if not text:
        return ""
    text = utils.default_process(text)
    return re.sub(r'\s+', ' ', text).strip()


def generate_variants(keyword: str, special_char_map: dict) -> List[str]:
    """生成关键词的所有变体形式"""
    variants = {normalize_text(keyword)}
    base = normalize_text(keyword)
    # 处理特殊字符变体
    for char, replacements in special_char_map.items():
        if char in base:
            for replacement in replacements:
                variant = base.replace(char, replacement)
                if variant:  # 避免生成空字符串
                    variants.add(variant)
    # 生成缩写形式（如"meta-rl" -> "mrl"）
    if any(c in base for c in ['-', '_', ' ']):
        abbrev = ''.join([w[0] for w in re.split(r'[-_\s]', base) if w])
        variants.add(abbrev)
    return list(variants)


def rapidfuzz_match(text: str, keyword: str, threshold: int, special_char_map: dict) -> bool:
    """
    使用RapidFuzz进行精确匹配
    :param text: 待匹配文本
    :param keyword: 原始关键词
    :param threshold: 匹配阈值(0-100)
    :param special_char_map: 特殊字符映射表
    :return: 是否匹配
    """
    if not text or not keyword:
        return False
    norm_text = normalize_text(text)
    keyword_variants = generate_variants(keyword, special_char_map)
    # 先尝试精确匹配
    for variant in keyword_variants:
        if len(variant) < 2:  # 忽略太短的变体
            continue
        pattern = re.compile(rf'(^|\W){re.escape(variant)}($|\W)')
        if pattern.search(norm_text):
            return True
    # 高精度模糊匹配
    text_words = re.findall(r'\b\w+\b', norm_text)
    for variant in keyword_variants:
        if len(variant) < 3:  # 短词只做精确匹配
            continue
        for word in text_words:
            if fuzz.partial_ratio(variant, word, score_cutoff=threshold) >= threshold:
                len_ratio = len(variant) / len(word)
                if 0.7 <= len_ratio <= 1.4:  # 长度比例控制
                    return True
    return False


class AcademicKeywordSearch:
    """学术关键词搜索分析工具"""

    def __init__(self, config=None):
        """初始化配置"""
        self.config = config or {
            "spark_config": {
                "spark.app.name": "AcademicKeywordSearch_Optimized",
                "spark.mongodb.input.uri": "mongodb://localhost:27017/arxiv_db.papers",
                "spark.jars.packages": "org.mongodb.spark:mongo-spark-connector_2.12:3.0.1",
                "spark.executor.memory": "4g",
                "spark.driver.memory": "4g",
                "spark.sql.shuffle.partitions": "8",
                "spark.local.dir": "../temp",
                "spark.cleaner.referenceTracking.cleanCheckpoints": "true"
            },
            "special_char_map": {
                '-': [' ', '_', ''],
                '_': [' ', '-', ''],
                ' ': ['-', '_', '']
            },
            "default_keyword_file": "../../data/ai_research_keywords.csv",
            "default_output_file": "keyword_results_optimized.csv",
            "match_threshold": 90
        }

        # 初始化Spark
        self.spark = self._init_spark_session()
        self.sc = self.spark.sparkContext
        self.sc.setLogLevel("ERROR")

    def _init_spark_session(self) -> SparkSession:
        """初始化Spark会话"""
        builder = SparkSession.builder
        for key, value in self.config["spark_config"].items():
            if key == "app_name":
                builder = builder.appName(value)
            else:
                builder = builder.config(key, value)
        return builder.getOrCreate()

    def load_keywords(self, keyword_file: str = None) -> List[str]:
        """加载关键词CSV文件"""
        file_path = keyword_file or self.config["default_keyword_file"]
        df = pd.read_csv(file_path)
        return df['keywords'].tolist()

    def load_and_preprocess_data(self) -> Tuple[Any, int]:
        """加载并预处理数据"""
        df = self.spark.read.format("mongo").load()
        # 预处理：标准化文本并缓存
        df = df.withColumn("norm_abstract", lower(col("abstract"))) \
               .withColumn("norm_title", lower(col("title"))) \
               .cache()
        total_papers = df.count()
        print(f"已加载 {total_papers} 篇论文，并完成预处理")
        return df, total_papers

    def process_keywords_optimized(self, df: Any, keywords: List[str], total_papers: int) -> List[Dict]:
        """
        高效处理关键词搜索
        确保每篇文章对每个关键词只计数一次
        """
        results = []
        keyword_matches = defaultdict(set)  # 使用set自动去重
        threshold = self.config["match_threshold"]
        special_char_map = self.config["special_char_map"]

        def process_partition(iterator, local_keywords: List[str], threshold: int, special_char_map: dict):
            local_matches = defaultdict(set)
            for paper in iterator:
                paper_id = paper.id
                abstract = paper.norm_abstract or ""
                title = paper.norm_title or ""
                matched_keywords = set()
                for keyword in local_keywords:
                    if rapidfuzz_match(abstract, keyword, threshold, special_char_map) or \
                       rapidfuzz_match(title, keyword, threshold, special_char_map):
                        matched_keywords.add(keyword)
                for keyword in matched_keywords:
                    local_matches[keyword].add(paper_id)
            yield local_matches

        sc = SparkContext.getOrCreate()
        keywords_bc = sc.broadcast(keywords)

        rdd = df.rdd.repartition(8).barrier().mapPartitions(
            lambda it: process_partition(it, keywords_bc.value, threshold, special_char_map)
        )

        with tqdm(total=total_papers, desc="处理论文进度") as pbar:
            for partition_result in rdd.collect():
                for kw, paper_ids in partition_result.items():
                    keyword_matches[kw].update(paper_ids)
                pbar.update(sum(len(ids) for ids in partition_result.values()))

        for keyword in keywords:
            paper_ids = keyword_matches.get(keyword, set())
            results.append({
                "keyword": keyword,
                "paper_count": len(paper_ids),
                "paper_ids": "|".join(paper_ids)
            })
        return results

    def save_results(self, results: List[Dict], filename: str = None):
        """保存结果到CSV"""
        file_path = filename or self.config["default_output_file"]
        pd.DataFrame(results).to_csv(file_path, index=False)
        print(f"结果已保存到 {file_path}")

    def run_analysis(self, keyword_file: str = None, output_file: str = None) -> List[Dict]:
        """执行完整分析流程"""
        try:
            start_time = time()
            keywords = self.load_keywords(keyword_file)
            print(f"已加载 {len(keywords)} 个搜索关键词")
            df, total_papers = self.load_and_preprocess_data()
            print("开始关键词搜索处理...")
            results = self.process_keywords_optimized(df, keywords, total_papers)
            self.save_results(results, output_file)
            print("\n=== 关键词匹配统计 ===")
            top_keywords = sorted(results, key=lambda x: x["paper_count"], reverse=True)[:10]
            for res in top_keywords:
                print(f"{res['keyword']: <25}: {res['paper_count']: >5}篇")
            end_time = time()
            print(f"\n总处理时间: {end_time - start_time:.2f}秒")
            return results
        except Exception as e:
            print(f"处理过程中发生错误: {str(e)}")
            raise

    # def __del__(self):
    #     """清理资源"""
    #     if hasattr(self, 'spark') and self.spark:
    #         try:
    #             self.spark.stop()
    #         except:
    #             pass


if __name__ == "__main__":
    aks = AcademicKeywordSearch()
    aks.run_analysis()