# 使用异步携程的方式
# 使用GET方法对网站一级列表的爬取
import sys 
import os
sys.path.append(os.getcwd()+"\creeper") 
from Mylib import aiodownload,mkdir, parsingData_regex,aiodownload_parsing

import asyncio #异步模块
import aiohttp #异步请求模块
from aiohttp.client import request
import aiofiles #异步文件读写
import csv  # CSV 读写
from aiocsv import AsyncReader, AsyncWriter #异步CSV
from lxml import etree #Xpath解析模块

import pandas as pd
import os

#路径配置
Folder_Path = os.getcwd()+"\\data\\" + os.path.basename(__file__).split('.')[0]         #要拼接的文件夹及其完整路径，注意不要包含中文
SaveFile_Path =  Folder_Path + "\\Catalog"        #拼接后要保存的文件路径
SaveFile_Name = r'目录.csv'                       #合并后要保存的文件名

#主函数
async def main():
    url_base = "http://sthj.tj.gov.cn/ZWFW4519/JSXMHPXX3381/HPSPJDGG3900/"
    tasks = [asyncio.create_task(aiodownload(url_base,f_path = Folder_Path, f_name = "0catalog"))]
    #HTML 抓取部分
    for number in range(1,3):
        tasks.append(asyncio.create_task(aiodownload_parsing(
            url= url_base+ "index_" + str(number) + ".html", 
            f_path = Folder_Path,
            f_name = str(number) +"catalog",
            pattern=  "<a class=\"fl\" href='(?P<sublink>.*?)'.*?title='(?P<title>.*?)'",
            g_list=["title","sublink"]   ,
            filepath= SaveFile_Path,
            csv_name= SaveFile_Name)))
    await asyncio.wait(tasks)


#执行代码
if __name__ == '__main__':
    loop = asyncio.get_event_loop()
    loop.run_until_complete(main())