from arxiv import Result
from meta_service.store import ArxivPaper
from meta_service.ucentre import CheckEntryIdType,ARXIVTYPEID
import arxiv
import pickle
from .utils import mkdir_if_not_exist
from datetime import datetime

DATESUFFIX = "000000"

def SplitWindow(start_date:str,end_date:str):
    start = int(start_date.removesuffix(DATESUFFIX))
    end = int(end_date.removesuffix(DATESUFFIX))
    if end < start:
        start, end = end, start
    # 处理
    syear = start // 10000
    eyear = end // 10000
    daterange = [start]
    for year in range(syear,eyear+1):
        for month in range(1,13):
            for day in [1,15]:
                curday = year * 10000 + month * 100 + day
                if curday > start and curday < end:
                    daterange.append(curday)
    daterange.append(end)
    # 组合窗口
    lenth = len(daterange)
    winitems = []
    for idx in range(0,lenth-1):
        left = daterange[idx]
        right = daterange[idx+1]
        item = (f"{left}{DATESUFFIX}",f"{right}{DATESUFFIX}")
        winitems.append(item)
    return winitems

def Date(year:int=2024,month:int=10,day:int=1):
    year = str(year)
    month = str(month).zfill(2)
    day = str(day).zfill(2)
    return f"{year}{month}{day}{DATESUFFIX}"

def Today():
    now = datetime.now()
    return Date(now.year,now.month,now.day)

def __get_entry_id(r:Result):
    entry_id_url = str(r.entry_id)
    entry_id = str(entry_id_url.split("/")[-1])
    return entry_id

def Result2Lancer(r:Result):
    paper = ArxivPaper()
    if r is None:
        print("search entry online failed")
        return None
    try:
        entry_id = __get_entry_id(r)
        paper.entry_id = entry_id
        paper.idtype = ARXIVTYPEID
        paper.title = r.title
        paper.summary = r.summary
        paper.comment = r.comment
        paper.site_url = r.entry_id
        paper.page_url = r.pdf_url
        paper.authors = [ str(one.name) for one in r.authors ]
        paper.categories = [ str(one) for one in r.categories ]
        paper.primary_category = r.primary_category
        paper.raw = dict(r._raw)
        # store
        paper.StoreToDB()
        # reload
        paper = ArxivPaper(entry_id)
        if paper.entry_id != entry_id:
            raise Exception(f"entry_id: {entry_id} reload: {paper.entry_id} difference")
    except Exception as e:
        print(f"convert result to lancer error: {str(e)}")
        paper = None
    return paper


def AppRun(query:str="ti:hallucination OR abs:hallucination",max_results:int=1000):

    # 构建API客户端
    client = arxiv.Client()

    # For advanced query syntax documentation, see the arXiv API User Manual:
    # https://arxiv.org/help/api/user-manual#query_details
    search = arxiv.Search(
        query = query,
        max_results = max_results,
        sort_by = arxiv.SortCriterion.SubmittedDate,
        sort_order = arxiv.SortOrder.Descending
    )

    print("searching...")

    results = client.results(search)
    all_results = list(results)

    print(f"get results(count:{len(all_results)}) ok")

    ready_entry_ids = set()
    not_ready_entry_ids = set()
    for r in all_results:
        paper = Result2Lancer(r)
        if paper is None:
            print(f"cannot load {r.entry_id}")
            not_ready_entry_ids.add(r.entry_id)
        else:
            entry_id = __get_entry_id(r)
            ready_entry_ids.add(entry_id)

    ready_entry_ids = list(ready_entry_ids)
    not_ready_entry_ids = list(not_ready_entry_ids)
    ready_entry_ids.sort()
    not_ready_entry_ids.sort()

    return ready_entry_ids,not_ready_entry_ids

