from requests import get as _get, post 
import requests_cache
from f42.extract import extract,extract_all
from os.path import dirname,abspath,join 
import traceback

PATH = abspath(dirname(__file__))
requests_cache.install_cache(join(PATH, "cache"))

def get(url):
    for i in range(3):
        try:
            html = _get(url, timeout=30).content.decode("utf-8", "ignore")
            break
        except:
            traceback.print_exc()
            html = ""
    return html

URL = "http://www.jisilu.cn/home/explore/sort_type-new__day-0__page-%s"

def fetch():
    page = 1
    exist = set()
    with open(join(PATH,"id_list.txt"),"w") as id_list:
        for i in exist:
            id_list.write(str(i)+"\n")
        
        while True:
            html = get(URL%page)
            page_list = extract_all(
                'sort_type-new__day-0__page-',
                '"',
                html,
            )

            for i in extract_all("http://www.jisilu.cn/question/",'"',html):
                if "-" in i:
                    i = i.split("-")[1].split("_")[0]
                if not i.isdigit():
                    continue
                i = int(i)
                if i not in exist:
                    exist.add(i)
                    id_list.write(str(i)+"\n")
                    if not page%10:
                        id_list.flush()
            print(page, len(exist))

            page += 1
            if page not in list(map(int,page_list)):
                break



fetch()
