#!/usr/bin/env python
# coding: utf-8
import os
import arxiv
import requests
from lxml import html


client = arxiv.Client()

def build_query_url(term, field, classification_computer_science, classification_physics_archives, classification_include_cross_list, size, order, start = None, *, latest_year = None, date_from = None, date_to = None):
    """构建查询url"""
    # https://arxiv.org/search/advanced?advanced=1&terms-0-operator=AND&terms-0-term=table&terms-0-field=title&classification-computer_science=y&classification-physics_archives=all&classification-include_cross_list=include&date-filter_by=past_12&date-year=&date-from_date=&date-to_date=&date-date_type=submitted_date&abstracts=show&size=50&order=-announced_date_first&start=50
    base_url = 'https://arxiv.org/search/advanced'
    # classification-computer_science=y
    # classification-physics_archives=all
    # classification-include_cross_list=include
    # date-filter_by=past_12
    # date-year=
    # date-from_date=
    # date-to_date=
    # date-date_type=submitted_date
    # abstracts=show
    # size=50
    # order=-announced_date_first
    # start=50

    params = {
            'advanced': '1',
            'terms-0-operator': 'AND',
            'terms-0-term': term,
            'terms-0-field': field,
            'classification-computer_science': classification_computer_science,
            'classification-physics_archives': classification_physics_archives,
            'classification-include_cross_list': classification_include_cross_list,
            #'date-filter_by': 'past_12',
            'date-year': '',
            'date-from_date': date_from,
            'date-to_date': date_to,
            'date-date_type': 'submitted_date',
            'abstracts': 'show',
            'size': size,
            'order': order
    }

    if latest_year is not None:
        params['date-filter_by'] = 'past_12'
        params['date-from_date'] = ''
        params['date-to_date'] = ''
    else:
        if 'date-filter_by' in params:
            del params['date-filter_by']


    if start is not None:
        params['start'] = start

    response = requests.get(base_url, params=params)
    return response.url

def web_query_paper_ids(term):
    """通过web直接调用"""
    result_list = []
    url = build_query_url("term", "title", "y", "all", "include", "50", '-announced_date_first')
    response = requests.get(url)

    html_content = response.content
    html_obj = html.fromstring(html_content)
    item_elements = html_obj.xpath('//li[@class="arxiv-result"]')
    for item in item_elements:
        title_elements = item.xpath('div/p/a')
        if title_elements is not None and len(title_elements) > 0:
            paper_id = title_elements[0].text_content().strip()
            print(paper_id)
            paper_id = paper_id.replace('arXiv:', '')
            result_list.append(paper_id)

    return result_list

def query_paper_ids(term):
    """通过arxiv的开源模型直接调用"""
    paper_id_list = []
    search = arxiv.Search(
        query= term,
        max_results=50,
        sort_by=arxiv.SoftCriterion.SubmittedDate
    )
    results = client.results(search)

    for r in results:
        paper_id_list.append(r.get_short_id())

    return paper_id_list


def fetch_info(paper_ids, abstract_path = 'abstracts', target_path='pdfs'):
    """获取信息"""
    if not os.path.exists(target_path):
        os.makedirs(target_path)

    if not os.path.exists(abstract_path):
        os.makedirs(abstract_path)

    client = arxiv.Client()
    for paper_id in paper_ids:
        paper = next(client.results(arxiv.Search(id_list=[paper_id])))
        title = paper.title.replace('/', '_')
        authors = ",".join([author.name for author in paper.authors])
        abstract = paper.summary
        pdf_url = paper.pdf_url
        pub_date = paper.published.date()

        file_name = title
        file_name = file_name.replace(':', '-')

        with open(f'{abstract_path}/{file_name}.txt', 'w+', encoding='utf-8') as txtfile:
            txtfile.write(f"Title: {title}\n")
            txtfile.write(f"Authors: {authors}\n")
            txtfile.write(f"Published: {pub_date}\n")
            txtfile.write(f"Abstract: {abstract}\n")
            txtfile.write(f"PDF url: {pdf_url}\n")

def main():
    paper_ids = web_query_paper_ids('table recognition')
    fetch_info(paper_ids)


if __name__ == '__main__':
    main()
    print('done')
