import asyncio
import csv
import os
import re

from lxml import etree
import aiohttp


async def classify_url():
    async with aiohttp.ClientSession() as session:
        async with session.get(
                'http://27.223.1.57:10000/PythonApplication/index.aspx?oneClassGuid=171030103404382666') as r:
            text = await r.text()
            # xpath+正则 获取分类标签中click中的id
            a = etree.HTML(text)
            url_list = [re.search(r"'(.*)'", i).group()[1:-1] for i in
                        a.xpath('//table[@class="twoNavigationborder"]//tr/@onclick')]
            # 遍历拼接url,创建task，且添加回调函数
            tasks = []
            for i in url_list:
                url = 'http://27.223.1.57:10000/PythonApplication/webbasesite/dataInfoList.aspx?oneClassGuid=' + i
                for i in range(5):
                    if i != 0:
                        url1 = url + '&lkocok_pageNo=' + str(i + 1)
                    else:
                        url1 = url
                    one_classify_spider = asyncio.create_task(one_classify(url1))
                    tasks.append(one_classify_spider)
                    one_classify_spider.add_done_callback(save_data)
            await asyncio.wait(tasks)


async def one_classify(url):
    async with aiohttp.ClientSession() as session:
        async with session.get(url) as r:
            return await r.text()


def save_data(task_obj):
    a = etree.HTML(task_obj.result())
    classify_name = re.findall(r'[\u4E00-\u9FA5]+', a.xpath('//td[@class=\'rightoneNavigation\']/text()')[1])[1]
    number = a.xpath('//table[@class="tt gray12_25"]//tr/td[1]/text()')
    number.pop(0)
    title = a.xpath('//table[@class="tt gray12_25"]//tr/td[2]/@title')
    time1 = a.xpath('//table[@class="tt gray12_25"]//tr/td[3]/text()')
    time1.pop(0)
    time2 = a.xpath('//table[@class="tt gray12_25"]//tr/td[4]/text()')
    time2.pop(0)
    data_list = []
    for i in range(len(number)):
        data_list.append({'number': number[i], 'title': title[i], 'time1': time1[i], 'time2': time2[i]})
    # print(data_list)

    # 判断是否存在文件夹，没有则创建
    download_path = os.getcwd() + '/青岛问政/'
    if not os.path.exists(download_path):
        os.makedirs(download_path)

    with open(download_path + '%s.csv' % classify_name, 'a', encoding='utf-8') as f:
        print('正在存贮....' + classify_name)
        a = csv.DictWriter(f, ['number', 'title', 'time1', 'time2'])
        a.writerows(data_list)


if __name__ == '__main__':
    asyncio.run(classify_url())
