# 探索 arxiv 数据集

import json
from tqdm import tqdm
import matplotlib.pyplot as plt  # 导入 matplotlib
from base import Agent
import re
import sqlite3

prompt = '''You are my research assistant, you are responsible for helping me compress the abstract, 
            compress it to 2000 characters or less, extract the key points of the abstract 
            (research content, research methods, research results, research significance).
            Note: only return the compressed abstract, do not output explanation.'''

agent = Agent(prompt)

def load_json_lines(file_path):
    """
    读取一个文件，每一行都是完整的 JSON 数据，并返回一个包含所有 JSON 对象的列表。

    :param file_path: 文件路径
    :return: 包含所有 JSON 对象的列表
    """
    json_objects = []
    with open(file_path, 'r', encoding='utf-8') as file:
        lines = file.readlines()  # 读取所有行
        for line in tqdm(lines, desc="加载 JSON 行"):  # 添加进度条
            json_objects.append(json.loads(line.strip()))
    
    # 将 json_objects 转换为 Document 格式
    return json_objects



def preprocess_json_data(json_objects):
    """
    预处理 JSON 数据，删除不必要的字段
    """
    for obj in tqdm(json_objects, desc="预处理 JSON 数据"):
        del obj['versions']
        del obj['license']
        del obj['doi']
    
    # 如果作者列表长度大于10，则只保留第一个作者
    for obj in tqdm(json_objects, desc="预处理作者列表"):
        if len(obj['authors']) > 10:
            obj['authors'] = [obj['authors'][0]+' et al.']
    
    # 如果摘要长度大于 2000，则调用 base.py 中的 agent.chat_local() 函数进行压缩
    # 将压缩后的摘要替换原来的摘要
    for obj in tqdm(json_objects, desc="预处理压缩摘要"):
        if len(obj['abstract']) > 2000:
            print("压缩前的摘要长度：", len(obj['abstract']))
            obj['abstract'] = agent.chat_local(obj['abstract'])
            # 去掉思考过程
            obj['abstract'] = re.sub(r'<think>.*?</think>', '',
                                      obj['abstract'], flags=re.DOTALL).strip()[:2500]
            print("压缩后的摘要：")
            print(obj['abstract'])
    
    # 为 json_objects 添加一个字段，表示 page_content
    for obj in tqdm(json_objects, desc="添加 page_content"):
        obj['page_content'] = "Title: " + obj['title'] + "\n" + "Abstract: " + obj['abstract']
    
    return json_objects


def analyze_json_data(json_objects):
    """
    分析 JSON 数据，统计不同类别的文章数量，找到字符最多的文章，
    以及计算标题和摘要的长度分布。

    :param json_objects: 包含所有 JSON 对象的列表
    :return: 各类别的文章数量，字符最多的文章，标题和摘要长度分布
    """
    category_count = {}
    max_length_article = None
    max_length = 0
    title_lengths = []
    abstract_lengths = []
    author_list_lengths = []

    for obj in tqdm(json_objects, desc="分析 JSON 数据"):
        # 统计类别
        categories = obj.get("categories", "").split()
        for category in categories:
            main_category = category.split('.')[0]  # 获取大类
            category_count[main_category] = category_count.get(main_category, 0) + 1
        
        # 计算 author list 的长度
        author_list = obj.get("authors", [])
        author_list_length = len(author_list)
        author_list_lengths.append(author_list_length)

        # 计算标题和摘要长度
        title_length = len(obj.get("title", ""))
        abstract_length = len(obj.get("abstract", ""))
        title_lengths.append(title_length)
        abstract_lengths.append(abstract_length)

        # 找到字符最多的文章
        total_length = title_length + abstract_length + author_list_length
        if total_length > max_length:
            max_length = total_length
            max_length_article = obj
        
        print("最长的title+摘要长度:", max_length)

    return category_count, max_length_article, title_lengths, abstract_lengths, author_list_lengths





def create_database(json_objects):
    """
    创建 SQLite 数据库并插入文章数据。

    :param json_objects: 包含所有 JSON 对象的列表
    """
    # 创建数据库连接
    conn = sqlite3.connect('docs/arxiv_articles.db')
    cursor = conn.cursor()

    # 创建表
    cursor.execute('''
    CREATE TABLE IF NOT EXISTS articles (
        id TEXT PRIMARY KEY,
        authors TEXT,
        journal_ref TEXT,
        doi TEXT,
        update_date TEXT,
        title TEXT
    )
    ''')

    # 插入数据 
    for obj in json_objects:
        cursor.execute('''
        INSERT OR IGNORE INTO articles (id, authors, journal_ref, doi, update_date, title)
        VALUES (?, ?, ?, ?, ?, ?)
        ''', (obj['id'], ''.join(obj['authors']), obj.get('journal-ref'), obj.get('doi'),
               obj.get('update_date'), re.sub(" +", " ", obj['title'].replace('\n', ''))))

    # 提交更改并关闭连接
    conn.commit()
    conn.close()


def query_database(title):
    """
    查询 SQLite 数据库，根据文章题目获取相关信息。

    :param title: 要查询的文章题目
    :return: 查询到的文章信息
    """
    # 创建数据库连接
    conn = sqlite3.connect('docs/arxiv_articles.db')
    cursor = conn.cursor()

    # 查询数据
    cursor.execute('''
    SELECT id, authors, journal_ref, doi, update_date FROM articles WHERE title LIKE ?
    ''', ('%' + title + '%',))
    
    # 将查询结果转换为列表
    results = list(cursor.fetchall()[0])

    # 简化作者列表
 #   try:
 #       results[1] = simplify_author_list(results[1])
 #   except Exception as e:
 #       print(f"简化作者列表出错: {e}")
 #       print(results)
 #   # 关闭连接
 
    conn.close()

    return results


def simplify_author_list(author_list):
    """
    简化作者列表，只保留第一个作者
    """
    # 将作者字符串转换为列表
    authors= author_list.split(',')
    if len(authors) > 10:
        return authors[0] + ' et al.'
    else:
        return author_list


def extract_title(article_info):
    """
    从文章信息中提取标题。

    :param article_info: 包含文章标题和摘要的字符串
    :return: 提取的标题
    """
    # 使用正则表达式提取标题
    article_info = re.sub(" +", " ", article_info.replace('\n', ''))
    match = re.search(r'Title:\s*(.*?)\s*Abstract:', article_info, re.DOTALL)
    if match:
        return match.group(1).strip()  # 返回提取的标题
    return None  # 如果未找到标题，则返回 None


def main1():
    json_objects = load_json_lines("arxiv/arxiv-metadata-oai-snapshot.json")
    category_count, max_length_article, title_lengths, abstract_lengths, author_list_lengths = analyze_json_data(json_objects)
    print("类别统计:", category_count)
    print("字符最多的文章:", max_length_article)
    #print("标题长度分布:", title_lengths)
    #print("摘要长度分布:", abstract_lengths)
    # 保存预处理后的 JSON 数据
    processed_json_objects = preprocess_json_data(json_objects)
    with open("arxiv/arxiv-metadata-oai-snapshot-processed.json", "w", encoding="utf-8") as f:
        json.dump(processed_json_objects, f, ensure_ascii=False, indent=4)

    # 绘制类别统计的饼图
    plt.figure(figsize=(12, 12))
    plt.pie(category_count.values(), labels=category_count.keys(), autopct='%1.1f%%', startangle=140)
    plt.title("categories distribution")
    plt.show()

    # 绘制作者数量分布的直方图
    plt.figure(figsize=(12, 12))
    plt.hist(author_list_lengths, bins=30, alpha=0.7, color='red', edgecolor='black')
    plt.title("author list length distribution")
    plt.xlabel("author list length")
    plt.ylabel("frequency")
    plt.grid(axis='y', alpha=0.75)
    plt.show()  # 显示作者数量直方图

    # 绘制标题长度分布的直方图
    plt.figure(figsize=(12, 12))
    plt.hist(title_lengths, bins=30, alpha=0.7, color='blue', edgecolor='black')
    plt.title("title length distribution")
    plt.xlabel("title length")
    plt.ylabel("frequency")
    plt.grid(axis='y', alpha=0.75)
    plt.show()  # 显示标题长度直方图

    # 绘制摘要长度分布的直方图
    plt.figure(figsize=(12, 12))
    plt.hist(abstract_lengths, bins=30, alpha=0.7, color='green', edgecolor='black')
    plt.title("abstract length distribution")
    plt.xlabel("abstract length")
    plt.ylabel("frequency")
    plt.grid(axis='y', alpha=0.75)
    plt.show()  # 显示摘要长度直方图

def main2():
    json_objects = load_json_lines("arxiv/arxiv-metadata-oai-snapshot.json")
    create_database(json_objects)  # 创建数据库并插入数据
    title_to_search = "Deuteron-Nucleus Collisions in a Multi-Phase Transport Model"
    article_info = query_database(title_to_search)  # 查询数据库
    print("查询结果:", article_info)

def main3():        
    article_info = '''Title: Recent developments in the theory of electromagnetic probes in relativistic heavy-ion collisions Abstract: The theoretical developments in the study of electromagnetic radiation in relativistic heavy-ion collisions are reviewed. The recent progress in the rates for photon and lepton pair production is discussed. Together with the improvements in the hydrodynamic descriptions of the bulk medium, the combined effort is discussed to resolve the "direct photon flow puzzle" in the RHIC and the LHC experiments. Further prediction of the direct photon production in high multiplicity proton-nucleus collisions at the LHC energy can serve as a signature of the quark gluon plasma formation in these small systems. Phenomenological study of dilepton production at finite net baryon density is highlighted at the collision energies available for the RHIC beam energy scan program. )'''

    title = extract_title(article_info)
    print("提取的标题:", title)

    query = query_database(title)
    print("查询结果:", query)

if __name__ == "__main__":
    main3()