#根据csv文件中的内容，爬取二级页面并解析保存
import sys
import os
sys.path.append(os.getcwd()+"\creeper")
import asyncio
from asyncio.windows_events import IocpProactor
import csv
import re
import aiohttp
import aiofiles
from aiocsv import AsyncReader, AsyncWriter, AsyncDictWriter
from lxml import etree
from E_entity import E_entity
import json
from Mylib import mkdir

Folder_Path =   os.getcwd()+'\\data\\huanjingbu\\details'   #环境部子页面存储路径
SaveFile_Path = os.getcwd()+'\\data\\huanjingbu\\save_huan'        #环境部二级页面目录汇总文件夹
SaveFile_Name = r'环境部爬取目录.csv'              #合并后要保存的文件名

#异步协程请求
async def aiodownload(url,number):
    print("===aiodownload starts===")
    async with aiohttp.ClientSession() as session:
        async with session.get(url) as resp:
            #写入获取到的html 文件
            tempHtml = await resp.text()
           
            print(type(tempHtml))
            
            print("=== Store the html ===")
            # async with aiofiles.open(Folder_Path + '\\' + str(number)+ "zihtml.html",mode="wb") as f:
            #     f.write(tempHtml)
            with open(Folder_Path + '\\' + str(number)+ "zihtml.html", mode="w",encoding="utf-8") as f:
                f.write(tempHtml)
            print("=== Finish store html ===")
            #解析数据
            entity = parsingData(tempHtml)
            #写入文件
            with open(Folder_Path + '\\' + str(number)+ "ziyemian.json",mode="w", encoding="utf-8") as f:
                f.write(json.dumps(entity.dict,ensure_ascii=False))  
               
    print("===aiodownload complete", number,"===")     

# 初步解析
def parsingData(text):
    # <div><span>发布机关</span><i style="float: left;">国家核安全局</i></div>
    # <div><span>发布机关</span><div style="width:auto;">生态环境部<br>商务部<br>科学技术部</div></div>
    header = []
    print("=====Entering parsing data =====")
    p_title = '<div><span style="float: left;">名　　称</span><p style="font-size: 16px;float: left;width: 87.2%;">(?P<title>.*?)</p></div>'
    p_index = '<div><span>索 引 号</span>(?P<index_num>.*?)</div>'
    p_classify = '<div><span>分　　类</span>(?P<classify>.*?)</div>'
    p_publisher = "<div><span>发布机关</span>.*?([\u4E00-\u9FA5]+).*?</div>"
    p_gen_date = '<div><span>生成日期</span>(?P<gen_date>.*?)</div>'
    p_approval_number = '<div><span>文　　号</span>(?P<approval_number>.*?)</div>'
    p_theme = '<div><span>主 题 词</span>(?P<theme>.*?)</div>'
    p_attach_pdf = 'oldsrc="(.*?.pdf)"'
    p_attach_doc = 'oldsrc="(.*?.doc)"'
    pattern_list = [p_title,p_index,p_classify,p_publisher, p_gen_date, p_approval_number, p_theme,p_attach_pdf, p_attach_doc]
    print("=== compile pattern ===")
    for pat in pattern_list:
        object = re.compile(pat,re.S)
        find_length = len(list(object.findall(text)))

        if find_length >0 :
            mylist = object.findall(text)
            header.append(mylist)
            print(mylist) 
        else:
            header.append("")   
        
        
             
    
    print("=== using xpath parsing ===")
    #xpath 解析  
    html = etree.HTML(text)

    #正文内容
    content_arr = html.xpath('//div[@class="Custom_UnionStyle"]//text()')
    content = ''
    for i in content_arr:
        content = content + i.strip(' ')

    print("content",content)
    entity = E_entity(header= header, content= content)
    print("=== Ends parsing data, return json object ===")
    print("dictionary:", entity.dict)
    return entity
    
async def main():
    # simple reading
    i=0;
    tasks = []
    async with aiofiles.open(SaveFile_Path+'\\'+ SaveFile_Name, mode="r", encoding="utf-8", newline="") as afp:
        async for row in AsyncReader(afp):
            i+=1
            print("=== append a new task ===")
            tasks.append(asyncio.create_task(aiodownload(row[3],i)))
    await asyncio.wait(tasks)
        
#执行代码
if __name__ == '__main__':
    mkdir(Folder_Path)
    mkdir(SaveFile_Path)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(main())