import json
import re

from bs4 import BeautifulSoup

from crawler import Crawler


def crawler():
    get_url = "http://10.100.244.12:8071/example/report/list.vm"

    with open('./cookie.json', 'r', encoding='utf8') as fp:
        headers = json.load(fp)

    crawler = Crawler(get_url=get_url, headers=headers)

    types = [{
        'xmlname': 'TRWRFZZXXMJZXX',
        'pageNum': 961,
        'fileName': '土壤污染_data.txt'
    }, {
        'xmlname': 1611227966117,
        'pageNum': 2799,
        'fileName': '大气污染_data.txt'
    }, {
        'xmlname': 'NCHJZZZXXMXX',
        'pageNum': 961,
        'fileName': '农村污染_data.txt'
    }, {
        'xmlname': 1598591571992,
        'pageNum': 2248,
        'fileName': '水污染_data.txt'
    }]

    data = {
        'pageNum': 962,
        'xmlname': 'NCHJZZZXXMXX',
        'customquery': 'false',
        'customquerytype': 'default',
        'isdesignpatterns': 'false',
        'XMMC': '',
        'SBND': '',
        'SF_CN': ''
    }

    for type_item in types:
        print(type_item)
        file_name = './' + type_item['fileName']
        data['xmlname'] = type_item['xmlname']
        for index in range(1, type_item['pageNum']):
            data['pageNum'] = index

            res = crawler.post(data)
            res_text = res.text
            parse_data(file_name, res_text)


def parse_data(file_name, res_text):
    soup = BeautifulSoup(res_text)
    pattern = re.compile(r"var reportRowDataList = (.*?);")
    script = soup.find("script", text=pattern)
    data_find = pattern.search(script.contents[0]).group(1)
    write_text(file_name, data_find)


def write_text(fileName, data):
    with open(fileName, 'a', encoding='utf-8') as f:
        f.write(data)
        f.write('\n')


if __name__ == '__main__':
    crawler()
