'''
author:        Wang Chenyang <cy-wang21@mails.tsinghua.edu.cn>
date:          2024-09-23
Copyright © Department of Physics, Tsinghua University. All rights reserved
'''

from xml.etree import ElementTree as ET
import requests
from datetime import datetime
from typing import Literal
import os

DATA_FOLDER = "data/arXiv"
RSS_URL_PREFIX = 'http://arxiv.org/rss/'
SUPPORTED_RSS = Literal["physics", "quant-ph"]
RSS_SOURCE_LIST = ["physics", "quant-ph"]


def get_pages_from_url(url: str) -> str:
    ''' Get pages (arXiv id) from url '''
    return url.split('/')[-1].strip()


def get_full_info(item: dict) -> None:
    ''' Get full information from:
            author, date, abstract, title, url
    '''
    item['type'] = 'preprint'
    item['publication'] = 'arXiv'
    item['pages'] = get_pages_from_url(item['url'])
    item['arxiv-id'] = item['pages'].split('v')[0].strip()

    for key, value in item.items():
        item[key] = value.replace('\n', '')


def parse_arXiv_query(response_content: bytes) -> list[dict]:
    ''' Get literature dicts from arXiv api response '''

    root = ET.fromstring(response_content)

    NS = {"atom": 'http://www.w3.org/2005/Atom'}

    all_items = []
    for entry in root.findall("atom:entry", namespaces=NS):
        curr_item = {
            'url': entry.find("atom:id", namespaces=NS).text.strip(),
            'date': entry.find("atom:updated", namespaces=NS).text.split('T')[0].strip(),
            'title': entry.find("atom:title", namespaces=NS).text.strip(),
            'abstract': entry.find("atom:summary", namespaces=NS).text.strip()
        }
        author_list = []
        for curr_author in entry.findall("atom:author", namespaces=NS):
            author_list.append(
                curr_author.find('atom:name', namespaces=NS).text.strip()
            )
        curr_item['author'] = ', '.join(author_list)
        get_full_info(curr_item)
        all_items.append(curr_item)

    return all_items


def parse_rss(response_content: bytes) -> list[dict]:
    ''' Parse arXiv rss file '''
    tree = ET.fromstring(response_content)
    NS = {'atom': 'http://arxiv.org/schemas/atom',
          'dc': 'http://purl.org/dc/elements/1.1/'}
    all_items = []
    for item in tree.findall('channel/item'):
        # 获取标题
        title = item.find('title').text
        # 获取摘要
        summary = item.find('description').text
        new_summary = summary.split('\n')
        abstract = new_summary[1][10:]

        # 获取链接
        link = item.find('link').text

        author = item.find("dc:creator", NS).text

        pub_date = item.find("pubDate", NS).text
        date_obj = datetime.strptime(pub_date.strip(), "%a, %d %b %Y %H:%M:%S %z")
        pub_date = date_obj.strftime("%Y-%m-%d")

        curr_item = {
            'title': title.strip(),
            'abstract': abstract.strip(),
            'url': link.strip(),
            'author': author.strip(),
            'date': pub_date.strip()
        }

        get_full_info(curr_item)

        all_items.append(curr_item)

    return all_items


def get_missing_info_from_api(literature_list: list[dict]) -> list[dict]:
    ''' Get info of the literature list by arXiv id '''
    arXiv_id_list = [
        get_pages_from_url(item['url']) for item in literature_list
    ]
    id_list_str = ','.join(arXiv_id_list)
    url = f"https://export.arxiv.org/api/query?id_list={id_list_str}"
    response = requests.get(url)
    all_items = parse_arXiv_query(response.content)
    return all_items


def get_latest_added(old_list: list[dict],
                     new_list: list[dict]) -> list[dict]:
    ''' Get new records that are not contained in the old list '''
    arXiv_id_list = [item['arxiv-id'] for item in old_list]
    latest_items = []
    for item in new_list:
        if item['arxiv-id'] not in arXiv_id_list:
            latest_items.append(item)
    return latest_items


def batch_update_rss(
    rss_source: SUPPORTED_RSS,
    new_data: Literal["rss"] | str | list[dict] = "rss",
    old_data: Literal["default", "none"] | str | list[dict] = "default"
) -> list[dict[str, str]]:
    ''' batch update arXiv literatures from rss files
            rss_source: which rss to get
            new_data: new data to add. The supported input types are:
                "rss": get directly from rss
                any string: get from a xml file named by 'new_data'
                list: list of literature dict
            old_data: old data to compare. The supported input types are:
                "default": get from default xml file, i.e. {DATA_FOLDER}/source/{rss_source}.xml
                "none": no old data
                any string: file name
                list: list + default xml file if exists
    '''
    default_xml_file = "%s/source/%s.xml" % (
        DATA_FOLDER, rss_source
    )
    # 1. Get old data
    if isinstance(old_data, str):
        if old_data == "default":
            if os.path.exists(default_xml_file):
                with open(default_xml_file, "rb") as fp:
                    source_str = fp.read()
                old_data = parse_rss(source_str)
            else:
                old_data = []

        elif old_data == "none":
            old_data = []

        else:
            # file name
            with open(old_data, "rb") as fp:
                source_str = fp.read()
            old_data = parse_rss(source_str)

    else:
        # list of dict
        if os.path.exists(default_xml_file):
            with open(default_xml_file, "rb") as fp:
                source_str = fp.read()
            old_data += parse_rss(source_str)
        else:
            old_data += []

    # 2. Get new data and save
    if isinstance(new_data, str):
        if new_data == "rss":
            response = requests.get(RSS_URL_PREFIX + rss_source)

            # check whether response is successful
            if response.status_code == 200:
                new_data = parse_rss(response.content)
                with open(default_xml_file, "wb") as fp:
                    fp.write(response.content)

            else:
                print(f'Failed to retrieve RSS feed: " \
                    "Status code {response.status_code}')
                return None

        else:
            # file name
            with open(new_data, "rb") as fp:
                source_str = fp.read()

            if new_data != default_xml_file:
                with open(default_xml_file, "wb") as fp:
                    fp.write(source_str)
            new_data = parse_rss(source_str)

    else:
        # list of dict
        pass

    # 3. return
    return get_latest_added(old_data, new_data)
