"""
Queries arxiv API and downloads papers (the query is a parameter).
The script is intended to enrich an existing database pickle (by default db.p),
so this file will be loaded first, and then new results will be added to it.
"""

import urllib.request
from bs4 import BeautifulSoup
from datetime import datetime as dt

from database import db as mongo


def parse_arxiv_url(url):
    """
    examples is http://arxiv.org/abs/1512.08756v2
    we want to extract the raw id and the version
    """
    ix = url.rfind('/')
    idVersion = url[ix+1:]  # extract just the id (and the version)
    parts = idVersion.split('v')
    try:
        return parts[0], int(parts[1])
    except:
        return parts[0], 1


def crawl_arxiv_daliy():

    # misc hardcoded variables
    base_url = 'https://export.arxiv.org/list/cs.CL/new'  # base api query url

    with urllib.request.urlopen(base_url) as url:
        response = url.read()
    soup = BeautifulSoup(response.decode('utf-8'), features='html.parser')
    # soup = BeautifulSoup(open(".\\Computation and Language authors_titles _new.CL_.html", 'r', encoding='utf-8'), features='html.parser')
    dlpage = soup.find(id='dlpage')
    dls = dlpage.find_all('dl')
    today = dt.now().strftime("%Y-%m-%d")
    published = dt.strptime(today, "%Y-%m-%d")
    num = 0
    addNum = 0
    updateNum = 0
    paperDoc = []
    for dl in dls:
        dts = dl.find_all('dt')
        dds = dl.find_all('dd')
        for index in range(len(dts)):
            rawId, version = parse_arxiv_url(dts[index].select('span > a:nth-child(1)')[0].attrs['href'])
            title = list(dds[index].find('div', class_='list-title mathjax').stripped_strings)[1]
            authorList = dds[index].select('div.list-authors > a')
            authors = ''
            displayAu = []
            for au in authorList:
                authors += f"{au.string};"
                displayAu.append({'name':au.string, 'url':f"https://export.arxiv.org/{au.attrs['href']}"})
            comments = dds[index].find('div', class_='list-comments mathjax')
            displayCa = list(dds[index].find('div', class_='list-subjects').stripped_strings)[1:]
            tags = [e[e.rfind('(')+1:e.rfind(')')] for e in displayCa]
            summary = dds[index].find('p', class_='mathjax')
            art = {'rawId': rawId, 'version': version, 'authors': authors, 'title': title.replace('\n', ' '), 'category': ';'.join(tags), 'published': published, "republish": True}
            if comments is not None:
                art['comments'] = ''.join(list(comments.stripped_strings)[1:])
            if summary is not None:
                summary = summary.string
                if summary is not None:
                    art['summary'] = summary.replace('\n', ' ')
            # add to our database if we didn't have it before, or if this is a new version
            former = mongo.article.find_one({'rawId': art['rawId']})
            if former is None:
                res = mongo.article.insert_one(art)
                art["_id"] = str(res.inserted_id)
                addNum += 1
                print(rawId, 'insert')
            else:
                res = mongo.article.update_one({'rawId': art['rawId']}, {"$set": art})
                if res.matched_count:
                    art["_id"] = str(mongo.article.find_one({'rawId': art['rawId']})["_id"])
                    updateNum += res.modified_count
                    print(rawId, 'update')
            num += 1
            art['authors'] = displayAu
            art['category'] = displayCa
            paperDoc.append(art)

    print("%s - 总共%d篇，增加%d篇，更新%d篇" % (today, num, addNum, updateNum))
    return paperDoc

