from f42.extract import extract,extract_all
from cache_get import get,PATH
from os.path import join
from collections import defaultdict
from urllib.parse import unquote
URL = "http://www.jisilu.cn/question/id-%s__sort-DESC__page-%s"

count = defaultdict(int)

def parse(html):
    username_set = set(extract_all(
        '"http://www.jisilu.cn/people/','"',html))
    for i in username_set:
        i = unquote(i)
        count[i]+=1


def fetch():
    with open(join(PATH,"id_list.txt")) as id_list:
        id_list = list(id_list)
        for pos, id in enumerate(id_list):
            page = 1
            url = URL%(id,page)
            while True:
                id = id.strip()
                print(pos, "%.2f%%"%(100*float(pos)/len(id_list)), id, page)
                html = get(url)
                parse(html)
                page += 1
                url = URL%(id,page)
                if url not in html:
                    break

    with open(join(PATH,"user_list.txt"),"w") as f:
        for k,v in sorted(count.items(),key=lambda x:x[1]):
            f.write("%s %s\n"%(v, k)) 
        
fetch()
