import json
import pandas as pd
import numpy as np
from datetime import datetime
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import string

# 下载 NLTK 需要的资源
nltk.download('punkt_tab')  # 下载分词器
nltk.download('stopwords')  # 下载停用词库

# 类别映射表，用于将缩写形式的类别转换为可读形式
category_map = {
'acc-phys': 'Accelerator Physics',
'adap-org': 'Not available',
'q-bio': 'Not available',
'cond-mat': 'Not available',
'chao-dyn': 'Not available',
'patt-sol': 'Not available',
'dg-ga': 'Not available',
'solv-int': 'Not available',
'bayes-an': 'Not available',
'comp-gas': 'Not available',
'alg-geom': 'Not available',
'funct-an': 'Not available',
'q-alg': 'Not available',
'ao-sci': 'Not available',
'atom-ph': 'Atomic Physics',
'chem-ph': 'Chemical Physics',
'plasm-ph': 'Plasma Physics',
'mtrl-th': 'Not available',
'cmp-lg': 'Not available',
'supr-con': 'Not available',

'econ.GN': 'General Economics',
'econ.TH': 'Theoretical Economics',
'eess.SY': 'Systems and Control',

'astro-ph': 'Astrophysics',
'astro-ph.CO': 'Cosmology and Nongalactic Astrophysics',
'astro-ph.EP': 'Earth and Planetary Astrophysics',
'astro-ph.GA': 'Astrophysics of Galaxies',
'astro-ph.HE': 'High Energy Astrophysical Phenomena',
'astro-ph.IM': 'Instrumentation and Methods for Astrophysics',
'astro-ph.SR': 'Solar and Stellar Astrophysics',
'cond-mat.dis-nn': 'Disordered Systems and Neural Networks',
'cond-mat.mes-hall': 'Mesoscale and Nanoscale Physics',
'cond-mat.mtrl-sci': 'Materials Science',
'cond-mat.other': 'Other Condensed Matter',
'cond-mat.quant-gas': 'Quantum Gases',
'cond-mat.soft': 'Soft Condensed Matter',
'cond-mat.stat-mech': 'Statistical Mechanics',
'cond-mat.str-el': 'Strongly Correlated Electrons',
'cond-mat.supr-con': 'Superconductivity',
'cs.AI': 'Artificial Intelligence',
'cs.AR': 'Hardware Architecture',
'cs.CC': 'Computational Complexity',
'cs.CE': 'Computational Engineering, Finance, and Science',
'cs.CG': 'Computational Geometry',
'cs.CL': 'Computation and Language',
'cs.CR': 'Cryptography and Security',
'cs.CV': 'Computer Vision and Pattern Recognition',
'cs.CY': 'Computers and Society',
'cs.DB': 'Databases',
'cs.DC': 'Distributed, Parallel, and Cluster Computing',
'cs.DL': 'Digital Libraries',
'cs.DM': 'Discrete Mathematics',
'cs.DS': 'Data Structures and Algorithms',
'cs.ET': 'Emerging Technologies',
'cs.FL': 'Formal Languages and Automata Theory',
'cs.GL': 'General Literature',
'cs.GR': 'Graphics',
'cs.GT': 'Computer Science and Game Theory',
'cs.HC': 'Human-Computer Interaction',
'cs.IR': 'Information Retrieval',
'cs.IT': 'Information Theory',
'cs.LG': 'Machine Learning',
'cs.LO': 'Logic in Computer Science',
'cs.MA': 'Multiagent Systems',
'cs.MM': 'Multimedia',
'cs.MS': 'Mathematical Software',
'cs.NA': 'Numerical Analysis',
'cs.NE': 'Neural and Evolutionary Computing',
'cs.NI': 'Networking and Internet Architecture',
'cs.OH': 'Other Computer Science',
'cs.OS': 'Operating Systems',
'cs.PF': 'Performance',
'cs.PL': 'Programming Languages',
'cs.RO': 'Robotics',
'cs.SC': 'Symbolic Computation',
'cs.SD': 'Sound',
'cs.SE': 'Software Engineering',
'cs.SI': 'Social and Information Networks',
'cs.SY': 'Systems and Control',
'econ.EM': 'Econometrics',
'eess.AS': 'Audio and Speech Processing',
'eess.IV': 'Image and Video Processing',
'eess.SP': 'Signal Processing',
'gr-qc': 'General Relativity and Quantum Cosmology',
'hep-ex': 'High Energy Physics - Experiment',
'hep-lat': 'High Energy Physics - Lattice',
'hep-ph': 'High Energy Physics - Phenomenology',
'hep-th': 'High Energy Physics - Theory',
'math.AC': 'Commutative Algebra',
'math.AG': 'Algebraic Geometry',
'math.AP': 'Analysis of PDEs',
'math.AT': 'Algebraic Topology',
'math.CA': 'Classical Analysis and ODEs',
'math.CO': 'Combinatorics',
'math.CT': 'Category Theory',
'math.CV': 'Complex Variables',
'math.DG': 'Differential Geometry',
'math.DS': 'Dynamical Systems',
'math.FA': 'Functional Analysis',
'math.GM': 'General Mathematics',
'math.GN': 'General Topology',
'math.GR': 'Group Theory',
'math.GT': 'Geometric Topology',
'math.HO': 'History and Overview',
'math.IT': 'Information Theory',
'math.KT': 'K-Theory and Homology',
'math.LO': 'Logic',
'math.MG': 'Metric Geometry',
'math.MP': 'Mathematical Physics',
'math.NA': 'Numerical Analysis',
'math.NT': 'Number Theory',
'math.OA': 'Operator Algebras',
'math.OC': 'Optimization and Control',
'math.PR': 'Probability',
'math.QA': 'Quantum Algebra',
'math.RA': 'Rings and Algebras',
'math.RT': 'Representation Theory',
'math.SG': 'Symplectic Geometry',
'math.SP': 'Spectral Theory',
'math.ST': 'Statistics Theory',
'math-ph': 'Mathematical Physics',
'nlin.AO': 'Adaptation and Self-Organizing Systems',
'nlin.CD': 'Chaotic Dynamics',
'nlin.CG': 'Cellular Automata and Lattice Gases',
'nlin.PS': 'Pattern Formation and Solitons',
'nlin.SI': 'Exactly Solvable and Integrable Systems',
'nucl-ex': 'Nuclear Experiment',
'nucl-th': 'Nuclear Theory',
'physics.acc-ph': 'Accelerator Physics',
'physics.ao-ph': 'Atmospheric and Oceanic Physics',
'physics.app-ph': 'Applied Physics',
'physics.atm-clus': 'Atomic and Molecular Clusters',
'physics.atom-ph': 'Atomic Physics',
'physics.bio-ph': 'Biological Physics',
'physics.chem-ph': 'Chemical Physics',
'physics.class-ph': 'Classical Physics',
'physics.comp-ph': 'Computational Physics',
'physics.data-an': 'Data Analysis, Statistics and Probability',
'physics.ed-ph': 'Physics Education',
'physics.flu-dyn': 'Fluid Dynamics',
'physics.gen-ph': 'General Physics',
'physics.geo-ph': 'Geophysics',
'physics.hist-ph': 'History and Philosophy of Physics',
'physics.ins-det': 'Instrumentation and Detectors',
'physics.med-ph': 'Medical Physics',
'physics.optics': 'Optics',
'physics.plasm-ph': 'Plasma Physics',
'physics.pop-ph': 'Popular Physics',
'physics.soc-ph': 'Physics and Society',
'physics.space-ph': 'Space Physics',
'q-bio.BM': 'Biomolecules',
'q-bio.CB': 'Cell Behavior',
'q-bio.GN': 'Genomics',
'q-bio.MN': 'Molecular Networks',
'q-bio.NC': 'Neurons and Cognition',
'q-bio.OT': 'Other Quantitative Biology',
'q-bio.PE': 'Populations and Evolution',
'q-bio.QM': 'Quantitative Methods',
'q-bio.SC': 'Subcellular Processes',
'q-bio.TO': 'Tissues and Organs',
'q-fin.CP': 'Computational Finance',
'q-fin.EC': 'Economics',
'q-fin.GN': 'General Finance',
'q-fin.MF': 'Mathematical Finance',
'q-fin.PM': 'Portfolio Management',
'q-fin.PR': 'Pricing of Securities',
'q-fin.RM': 'Risk Management',
'q-fin.ST': 'Statistical Finance',
'q-fin.TR': 'Trading and Market Microstructure',
'quant-ph': 'Quantum Physics',
'stat.AP': 'Applications',
'stat.CO': 'Computation',
'stat.ME': 'Methodology',
'stat.ML': 'Machine Learning',
'stat.OT': 'Other Statistics',
'stat.TH': 'Statistics Theory'
}

# 加载数据集并筛选指定年份范围内的论文数据
def load_filtered_data(filepath, start_year, end_year):
    """
    加载指定年份范围内的论文数据。

    :param filepath: str, 文件路径
    :param start_year: int, 起始年份
    :param end_year: int, 结束年份
    :return: list, 筛选后的论文数据
    """
    filtered_data = []
    with open(filepath, 'r') as f:
        for line in f:
            paper = json.loads(line)  # 将 JSON 格式的字符串转换为字典
            update_date = paper.get("update_date")
            if update_date:  # 如果存在更新日期字段
                paper_date = datetime.strptime(update_date, "%Y-%m-%d")  # 转换为日期对象
                if start_year <= paper_date.year <= end_year:  # 判断是否在指定年份范围内
                    # 提取需要的字段
                    filtered_data.append({
                        "id": paper.get("id"),
                        "title": paper.get("title"),
                        "abstract": paper.get("abstract"),
                        "categories": paper.get("categories"),
                    })
    return filtered_data


# 将论文类别从缩写形式转换为可读形式
def get_cat_text(x):
    """
    将类别缩写转换为完整名称。

    :param x: str, 类别缩写
    :return: str, 类别完整名称
    """
    cat_text = ''
    cat_list = x.split(' ')  # 按空格分割多个类别
    for i, item in enumerate(cat_list):
        cat_name = category_map[item]  # 使用映射表获取类别完整名称
        if cat_name != 'Not available':  # 跳过不可用类别
            if i == 0:
                cat_text = cat_name
            else:
                cat_text = cat_text + ', ' + cat_name  # 多个类别用逗号分隔
    return cat_text.strip()  # 去掉多余的空格


# 删除文本中的换行符
def clean_text(x):
    """
    删除文本中的换行符和首尾空格。

    :param x: str, 原始文本
    :return: str, 清理后的文本
    """
    new_text = x.replace("\n", " ")  # 替换换行符为空格
    return new_text.strip()  # 去掉首尾空格


# 预处理文本：小写化、去标点符号、去停用词
def preprocess_text(text):
    """
    预处理文本：包括小写化、去除标点符号、去除停用词。

    :param text: str, 原始文本
    :return: str, 预处理后的文本
    """
    text = text.lower()  # 转换为小写
    text = text.translate(str.maketrans('', '', string.punctuation))  # 去除标点符号
    words = word_tokenize(text)  # 分词
    words = [word for word in words if word not in stop_words]  # 去停用词
    return ' '.join(words)  # 重新组合为字符串


if __name__ == '__main__':
    # 文件路径
    file_path = "./arxiv-metadata-oai-snapshot.json"

    # 指定筛选的年份范围
    start_year = 2023
    end_year = 2024

    # 加载指定年份范围内的数据
    filtered_data = load_filtered_data(file_path, start_year, end_year)

    # 转换为 Pandas DataFrame 格式
    df = pd.DataFrame(filtered_data)

    print("截取23~24年数据，共{}篇论文".format(len(df)))

    # 转换类别为可读形式
    df['cat_text'] = df['categories'].apply(get_cat_text)

    print("转换特征")

    # 清理标题和摘要文本中的换行符
    df['title'] = df['title'].apply(clean_text)
    df['abstract'] = df['abstract'].apply(clean_text)

    print("删除换行符")

    # 加载 NLTK 停用词
    stop_words = set(stopwords.words('english'))

    # 对摘要文本进行预处理
    df['processed_abstract'] = df['abstract'].apply(preprocess_text)

    print("删除停用词")

    # 将标题和处理后的摘要合并为一个字段
    df['text'] = df['title'] + ' {title} ' + df['processed_abstract']

    print("合并标题和摘要")

    # 将处理后的数据保存为 JSON 文件
    df.to_json("processed_data.json", orient="records", lines=True, force_ascii=False)

    print("数据保存成功")

    # 提取文本、论文 ID 和类别信息
    chunk_list = list(df['text'])
    arxiv_id_list = list(df['id'])
    cat_list = list(df['cat_text'])

    # 加载 SentenceTransformer 模型
    from sentence_transformers import SentenceTransformer
    model = SentenceTransformer("./models/all-MiniLM-L6-v2")
    print("模型加载成功")

    # 生成文本的嵌入表示
    embeddings = model.encode(chunk_list)

    # 将嵌入表示保存为压缩文件
    np.savez_compressed('compressed_array.npz', array_data=embeddings)

    print("embeddings 创建完成")
