import json
import time
import requests

from bs4 import BeautifulSoup
from tools import FileCache, random_ua
from urllib import parse

from playwright.sync_api import sync_playwright, TimeoutError

datetime_string = time.strftime('%Y-%m-%d')

base_url = f'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/{int(datetime_string[:4]) - 1}/'
file_cache = FileCache(filename=f'temp/_temp_.{datetime_string[:7]}', init_value=set())

play_ins = sync_playwright().start()
browser = play_ins.chromium.launch(headless=False, timeout=10000)
page = browser.new_page()
page.set_default_navigation_timeout(1200)


def parse_sub_page(url, level, parent_code=''):
    page_data = []

    if url and url not in file_cache:
        try:
            error_count = 0
            while error_count < 5:
                try:
                    page.goto(url)
                    page.wait_for_timeout(600)
                    break
                except TimeoutError:
                    page.wait_for_timeout(600)
                    error_count += 1
                    continue
            else:
                raise TimeoutError('请等待一段时间，再次运行。')
            resp = page.content()
            bs_ins = BeautifulSoup(resp, 'lxml')
            tr_list = bs_ins.select('table[class] tr:not(:first-of-type)')
            for tr in tr_list:
                sub_area_name = ''
                sub_area_children_page_href = ''
                area_code_el = tr.select_one('td:first-of-type')
                area_name_el = tr.select_one('td:last-of-type')
                if area_code_el.select('a[href]'):
                    area_code_el = area_code_el.select_one('td a[href]')
                    area_name_el = area_name_el.select_one('td a[href]')
                    sub_area_children_page_href = parse.urljoin(url, area_code_el.attrs['href'])

                sub_area_code = area_code_el.text
                sub_area_name = area_name_el.text

                sub_item_data = dict()
                sub_item_data['area_code'] = sub_area_code
                sub_item_data['area_level'] = level
                sub_item_data['name'] = sub_area_name
                sub_item_data['parent_code'] = parent_code or sub_area_code[:2].ljust(12, '0')
                sub_item_data['href'] = sub_area_children_page_href
                sub_item_data['children'] = parse_sub_page(
                    sub_area_children_page_href,
                    level + 1,
                    sub_area_code,
                )
                page_data.append(sub_item_data)

                file_cache.add(url)
        except Exception as e:
            print(time.strftime('%H:%M:%S'), f'出现错误:{e}', f'请求地址: {url}')
            file_cache.discard(url)
            raise e
    return page_data


def master():
    cur_level = 1
    resp = requests.get(base_url)
    bs_ins = BeautifulSoup(resp.content, 'lxml')
    with FileCache(f"temp/area_code_data.{datetime_string[:7]}", init_value={}) as china_area_data:
        for province_el in bs_ins.select('table[class] a[href]'):
            # 获取省级别行政区域
            area_name = province_el.text
            # 提取并拼接url
            area_children_page_href = parse.urljoin(base_url, province_el.attrs['href'])

            if area_name in china_area_data:
                continue

            print(area_name)
            province_el_data = dict()
            province_el_data['area_level'] = cur_level
            province_el_data['name'] = area_name
            province_el_data['parent_code'] = ''
            province_el_data['href'] = area_children_page_href
            province_el_data['children'] = parse_sub_page(area_children_page_href, cur_level + 1)
            province_el_data['area_code'] = province_el_data['children'][0]['parent_code']

            # 一条完整的数据
            china_area_data[area_name] = province_el_data

        json.dump(china_area_data.data, open('china_area_code.json', mode='wt', encoding='utf-8'), ensure_ascii=False)


if __name__ == '__main__':
    master()
