url="http://sthj.tj.gov.cn/ZWGK4828/ZFXXGK8438/FDZDGK27/XZXKFWSXXZXKXX9079/202010/t20201021_3975593.html"
import sys 
import os
sys.path.append(os.getcwd()+"\creeper") 

from os import path
import asyncio #异步模块
import aiohttp #异步请求模块
from aiohttp.client import request
import aiofiles #异步文件读写
import csv  # CSV 读写
from aiocsv import AsyncReader, AsyncWriter #异步CSV
from lxml import etree #Xpath解析模块
import pandas as pd
import os
import re
from Mylib import aiodownload_parsing, mkdir
from Mylib import aiodownload
from bs4 import BeautifulSoup

# #路径配置
Folder_Path = os.getcwd()+"\\data\\Tianjin\\" + os.path.basename(__file__).split('.')[0]         #要拼接的文件夹及其完整路径，注意不要包含中文

#主函数
async def main():
    url_base = url
    tasks = []  
    content = None
    soup = None
    tasks.append(asyncio.create_task(aiodownload(
        url= url, 
        f_path = Folder_Path,
        f_name = "detail_page",
    )))
    await asyncio.wait(tasks)

    with open(Folder_Path+"\\"+"detail_page.html",mode='r',encoding="utf-8") as f:
        content = f.read()
        soup = BeautifulSoup(content,"html.parser")
    
    res = soup.find(class_ = "MsoNormalTable").get_text()
    with open(Folder_Path + '\\' + "detail_content" + ".txt", mode="w",encoding="utf-8") as f:
                f.write(res)
                print ("Complete!")
    print(res)

#执行代码
if __name__ == '__main__':
    loop = asyncio.get_event_loop()
    loop.run_until_complete(main())