import logging
import os

import lxml.html
import requests

logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')

BASE_URL = 'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2020/'
LEVELS = ['province', 'city', 'county', 'town', 'village']
CACHE_DIR = '/var/cache/areasproject/'  # '/tmp/'


def fetch_html(url):
    full_url = BASE_URL + url
    cachefile = CACHE_DIR + 'stats-gov-cn-' + url.replace('/', '-')
    logging.info(f'fetch: {full_url} ... {cachefile}')
    if os.path.exists(cachefile):
        with open(cachefile, 'r', encoding='gb2312', errors='ignore') as f:
            html = f.read()
    else:
        while True:
            try:
                logging.info(f'fetch (real): {full_url} ... {cachefile}')
                html = requests.get(full_url, timeout=3).content
                with open(cachefile, 'wb') as f:
                    f.write(html)
                break
            except Exception as e:
                logging.warning(e)
                continue
    return html


def parse_list(html, base_url='', tr='citytr'):
    logging.info(f'parse: {base_url=}, {tr=}')
    doc = lxml.html.fromstring(html)
    rows = doc.cssselect(f'tr.{tr}')
    for row in rows:
        # row_html = lxml.html.tostring(row)
        # logging.info(row_html)
        cols = row.cssselect('td')
        cols_len = len(cols)
        if cols_len == 2:
            eles = row.cssselect('td a')
            if not eles:
                eles = cols
            code_ele, name_ele = eles
            code = code_ele.text_content()
            name = name_ele.text_content()
            url = name_ele.get('href')
            yield {'name': name, 'code': code, 'url': (base_url + url) if url else ''}
        else:
            code_ele, cate_code_ele, name_ele = row.cssselect('td')
            code = code_ele.text_content()
            cate_code = cate_code_ele.text_content()  # 城乡分类代码
            name = name_ele.text_content()
            yield {'name': name, 'code': code, 'url': '', 'cate_code': cate_code}


def fetch_list(url, level=1):
    level_name = LEVELS[level]
    html = fetch_html(url)
    base_url = os.path.dirname(url)
    if base_url:
        base_url += '/'
    for row in parse_list(html, base_url, tr=f'{level_name}tr'):
        logging.info(row)
        if row['url']:
            fetch_list(row['url'], level+1)


def main():
    start_html = fetch_html('index.html')
    start_doc = lxml.html.fromstring(start_html)
    province_selector = 'tr.provincetr > td > a'
    elements = start_doc.cssselect(province_selector)
    provinces = []
    for element in elements:
        url = element.get('href')
        name = element.text
        logging.info([name, url])
        provinces.append({'name': name, 'url': url})
        fetch_list(url, level=1)

    # import multiprocessing.pool
    # thread_pool = multiprocessing.pool.ThreadPool(10)
    # tasks = []
    # for province in provinces:
    #     tasks.append(province['url'])
    # thread_pool.map(fetch_list, tasks)
    # thread_pool.close()
    # thread_pool.join()


if __name__ == '__main__':
    main()
