import requests
from bs4 import BeautifulSoup
import  csv
from datetime import datetime
import pandas as pd
import numpy as np
from bs4.element import TemplateString
# 处理web3时间等信息
# web3time = web3DF['举办时间']
# 完整显示

import datetime
import time
from datetime import datetime
from datetime import timedelta
from dateutil.relativedelta import relativedelta

#访问网页并获取响应内容(每个网页都要做的第一件事)
def get_html_content(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        html_content = response.text
        return html_content
    except Exception as e:
        print(f"网络请求异常：{e}")
        return None

# Maigoo网站
def parse_html_01(html_content):
    # Quanguozhanhui_preurl = 'https://m.onezh.com/'
    soup = BeautifulSoup(html_content, 'html.parser')
    carList = soup.find_all('tr', attrs={'class': 'li font14'})
    print(carList[0])
    # 遍历列表，进行每个展会信息的输出
    for item_a in carList:
        the_td = item_a.select("td")

        the_name =the_td[1].string
        the_date = the_td[2].string
        the_local = the_td[3].string
        the_url = the_td[1].select("a")[0]['href']


        # 开始爬二级页面：
        target_inside = the_url
        html_content_inside = get_html_content(target_inside)
        if html_content_inside:
            soup_inside = BeautifulSoup(html_content_inside, 'html.parser')

            the_info_all = soup_inside.find_all('div', attrs={'class': 'infobox'})
            the_new_name = the_info_all[0].select("div[class='name font24 dhidden b']")[0].string
            the_li_list = the_info_all[0].select("ul[class='font16'] li")
            # the_new_url = the_li_list[-1].select("a")[0]['href']
            the_new_url = the_li_list[-1].select("a")
            if the_new_url:
                the_new_url = the_new_url[0]['href']


            print("二级页面的名字：",the_new_name,"正确的官网网址：",the_new_url)

        #写入CSV  二级页面的展会有的不提供官网网址，那么就留一级页面的网址
        if the_new_url:
            writer.writerow(['','',the_new_name,'','',the_date,the_local,'','','','',the_new_url,'',''])
        else:
            writer.writerow(['', '', the_new_name, '', '', the_date, the_local, '', '', '', '', the_url, '', ''])

    return carList






#控制流程，调用上述函数完成数据抓取任务,main函数
if __name__ == '__main__':

# 001新建一个csv文件，用来写入数据，并打开
    filename = datetime.now().strftime('车展信息汇总-%Y-%m-%d-%H%M')
    file = open(filename+'.csv','w',newline='',encoding='utf-8')
    writer = csv.writer(file)
    writer.writerow(
        ['序号', '月份', '展会', '类别', '举办地', '举办时间', '举办展馆', '展品范围', '主办机构', '机构性质', '重要度',
         '展会网址', '说明', '统计'])


    #
    target_url_01 = 'https://www.maigoo.com/news/665605.html'
    print('进入网址：',target_url_01)
    html_content_01 = get_html_content(target_url_01)
    if html_content_01:
        result_list = parse_html_01(html_content_01)
    else:
        print("网页访问失败")


#003 关闭csv文件的写入操作
    file.close()


