'''
author:        Wang Chenyang <cy-wang21@mails.tsinghua.edu.cn>
date:          2024-09-24
Copyright © Department of Physics, Tsinghua University. All rights reserved

Main script for ai assisted literature
'''

import json
import ai_tools
import literature_format as lf
import os


with open("LLM-config.json", "r") as fp:
    LLM_json = json.load(fp)
    user_key = LLM_json['LLM-key']
    user_chat_model = LLM_json['LLM-name']

ai_model = ai_tools.zhipu.ZhipuLLM(
    user_key, user_chat_model, ""
)


def write_result_list(success_list, failed_list, target_folder):
    for item_info in success_list:
        md_str = lf.markdown.save_to_markdown([item_info[0]])
        with open(
            "%s/%s.md" % (target_folder, item_info[2]),
            "a",
            encoding="utf-8"
        ) as fp:
            print(md_str, file=fp)

    for item_info in failed_list:
        md_str = lf.markdown.save_to_markdown([item_info[0]])
        with open(
            "%s/ZZZ.failed.md" % (target_folder),
            "a",
            encoding="utf-8"
        ) as fp:
            print(md_str, file=fp)
            print("message: " + item_info[1] + "\n\n", file=fp)


def batch_get_arXiv(rss_source: lf.arXiv.SUPPORTED_RSS):
    # 0. Batch get saved data
    all_items = []
    for cat_id, cat_name in enumerate(ai_tools.ALL_CATEGORIES):
        fname = "data/arXiv/%s.%s.md" % (
            chr(ord('A') + cat_id),
            cat_name.replace('\'', '').replace(" ", "_")
        )

        if os.path.exists(fname):
            all_items += lf.markdown.parse_markdown_file(fname)

    # 1. Get latest data
    latest_data = lf.arXiv.batch_update_rss(
        rss_source,
        old_data=all_items
        # old_data="none",
        # new_data=(
        #     lf.arXiv.DATA_FOLDER + "/source/" + rss_source + ".xml"
        # )
    )

    # 2. Get tags
    success_list, failed_list = ai_tools.main_classify_literatures(
        ai_model, latest_data
    )

    # 3. Save data
    write_result_list(success_list, failed_list, lf.arXiv.DATA_FOLDER)


def regroup_arXiv():
    ''' Regroup the records from a file '''
    fname = r"data\arXiv\M.Quantum_information.md"

    print("Regrouping ...")
    all_dicts = lf.markdown.parse_markdown_file(fname)
    success_list, failed_list = ai_tools.main_classify_literatures(
        ai_model, all_dicts
    )

    with open(fname, "w") as fp:
        pass

    write_result_list(success_list, failed_list, lf.arXiv.DATA_FOLDER)


if __name__ == '__main__':
    for rss_source in lf.arXiv.RSS_SOURCE_LIST:
        print("Current source:", rss_source)
        batch_get_arXiv(rss_source)
    # regroup_arXiv()
