#使用异步携程的方式,完成对环境部官网列表的爬取
import sys
import os
sys.path.append(os.getcwd()+"\creeper") 
import asyncio #异步模块
import aiohttp #异步请求模块
from aiohttp.client import request
import aiofiles #异步文件读写
import csv  # CSV 读写
from aiocsv import AsyncReader, AsyncWriter #异步CSV
from lxml import etree #Xpath解析模块

from Mylib import mkdir
import pandas as pd
import os
import re

Folder_Path = os.getcwd()+'\\data\\huanjingbu\\frag_huan'         #要拼接的文件夹及其完整路径，注意不要包含中文
SaveFile_Path =  os.getcwd()+'\\data\\huanjingbu\\save_huan'      #拼接后要保存的文件路径
SaveFile_Name = r'环境部爬取目录.csv'              #合并后要保存的文件名

#页面URL基本地址（环境部）
url_base = "https://www.mee.gov.cn/govsearch/xxgkjs_new2019.jsp"

#异步协程
async def aiodownload(url,number):
    params = {
        "SType":"2",
        "tc":"1",
        "page":number,
        "orderby":"date"
    }
    async with aiohttp.ClientSession() as session:
        async with session.get(url, params = params) as resp:
            #解析数据
            content = parsingData_regex(await resp.text())
            #写入文件  
            async with aiofiles.open(Folder_Path + '\\' + str(number)+ "huanjingbu.csv",mode="w",encoding="utf-8",newline="") as f:
                writer = AsyncWriter(f, dialect="unix")
                await writer.writerows(content)      
    print("完成", number)

##使用xpath解析数据函数(网站估计删去了tr 这种方法不好用了)
def parsingData_xpath(text):

    #xpath 解析  
    html = etree.HTML(text)

    #获取信息表格
    table = html.xpath("//*[@class=\"iframe-list\"]/table")[0]
    trs = table.xpath("./tr")

    #处理每一个表格行信息
    content = []
    for tr in trs:
        time = tr.xpath("./td/span/text()")[0]
        txt = tr.xpath("./td[3]/text()")[0]
        title = tr.xpath("./td[2]/a/text()")[0]
        link = tr.xpath("./td[2]/a/@href")[0]
        row = [time,title,txt,"https://www.mee.gov.cn/xxgk2018"+ link[2:]]
        content.append(row)
        print(content)
    return content

#使用正则解析html函数
def parsingData_regex(text):
    pattern = "<td class=\"td-date\">.*?<span>(?P<time>.*?)</span>.*?</td>.*?<td>"  ".*?<a href=\"(?P<sublink>.*?)\".*?title=\"(?P<title>.*?)\">.*?</a>.*?</td>" ".*?<td class=\"td-right\">(?P<wenhao>.*?)</td>"
    object = re.compile(pattern, re.S)
    resultIter = object.finditer(text)
    content = []
    for item in resultIter:
        time = item.group("time")
        wenhao = item.group("wenhao")
        title = item.group("title")
        link = item.group("sublink")
        row = [time,title,wenhao,link]
        content.append(row)
    return content

#合并CSV函数
def gatherCSV():
    #修改当前工作目录
    os.chdir(Folder_Path)

    #将该文件夹下的所有文件名存入一个列表
    file_list = os.listdir()

    #读取第一个CSV文件并包含表头
    df = pd.read_csv(Folder_Path +'\\'+ file_list[0])   #编码默认UTF-8
 
    #将读取的第一个CSV文件写入合并后的文件保存
    df.to_csv(SaveFile_Path + '\\'+ SaveFile_Name,encoding="utf_8_sig",index=False)
 
    #循环遍历列表中各个CSV文件名，并追加到合并后的文件
    for i in range(1,len(file_list)):
        df = pd.read_csv(Folder_Path + '\\'+ file_list[i])
        df.to_csv(SaveFile_Path+'\\'+ SaveFile_Name,encoding="utf_8_sig",index=False, header=False, mode='a+')

#主函数
async def main():
    tasks = [asyncio.create_task(aiodownload(url_base,number)) for number in range(1,3)]
    await asyncio.wait(tasks)
    

#执行代码
if __name__ == '__main__':
    mkdir(Folder_Path)
    mkdir(SaveFile_Path)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(main())
    gatherCSV()