import requests

from bs4 import BeautifulSoup
import  csv
from datetime import datetime



#访问网页并获取响应内容(每个网页都要做的第一件事)
def get_html_content(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        html_content = response.text
        return html_content
    except Exception as e:
        print(f"网络请求异常：{e}")
        return None

#解析网页并提取目标数据，有三个网页，分别用函数parse_html_01、parse_html_02、parse_html_03来针对性爬取信息
def parse_html_01(html_content):
    Quanguozhanhui_preurl = 'https://m.onezh.com/'

    soup = BeautifulSoup(html_content, 'html.parser')
    carList = soup.find_all('a', attrs={'class': 'mlcCar'})

    # 遍历a标签列表，进行每个展会信息item_a的输出
    for item_a in carList:
        the_name = item_a.select("div[class='mlccRight'] h2[class='mlccrTitle']")
        the_date = item_a.select("div[class='mlccRight'] p[class='mlccrTime']")
        the_local = item_a.select("div[class='mlccRight'] span[class='mlccrpPrice']")

        print("展会的名字：",the_name[0].string)
        print("展会的日期：",the_date[0].string)
        print("展会的开展地址：",the_local[0].string.strip())
        print("展会的网页地址：",Quanguozhanhui_preurl+item_a['href'])

        #     写入CSV
        writer.writerow(['','',the_name[0].string,'',the_local[0].string.strip(),the_date[0].string,'','','','','',Quanguozhanhui_preurl+item_a['href'],'',''])

    return carList

def parse_html_02(html_content):
    Shizhanwang_preurl = 'https://www.shifair.com/'
    soup = BeautifulSoup(html_content, 'html.parser')

    # 001 世展网的提取 关键词“汽车”
    carList = soup.find_all('div', attrs={'class': 'info_list_block_item'})
    print(carList.len())
    for item in carList:
        the_name = item.select("div[class='info_list_block_item_info'] h2[class='info_list_block_item_info_title'] a")
        the_date = item.select("div[class='info_list_block_item_info'] div[class='info_list_block_item_info_time']")

        print("展会的名字：", the_name[0].string)
        print("展会的日期：", the_date[0].string)
        print("展会的开展地址：没有地址")
        print("展会的网页地址：", Shizhanwang_preurl + the_name[0]['href'])
#     写入CSV
        writer.writerow(['','',the_name[0].string,'','',the_date[0].string,'','','','','',Shizhanwang_preurl + the_name[0]['href'],'',''])

    return carList

def parse_html_03(html_content):
    soup = BeautifulSoup(html_content, 'html.parser')
    # 在此处进行数据提取
    Gongyingshang_preurl = 'https://fair.china.cn'

    # 供应商展会网的提取 关键词“汽车”
    carList = soup.find_all('li',attrs={'class':'fl-clr'})
    for item in carList:
        the_name = item.select("div[class='ex-titlename fl-clr'] a[class='title-ex']")
        the_info = item.select("div[class='fl-left handle-hd'] p[class='detial-bd']")
        # the_info内容巨多：举办日期：2023.11.01-03 | 城市：广州 | 举办场馆：保利世贸博览馆 | 行业：电动车 | 举办周期：1年 | 举办次数：***0届 | 展出面积：20000平米 | 上届展商数：待核实 | 上届观众数：待核实
        # 需要通过正则表达式来筛选一下信息


        print("展会的名字：", the_name[0]['title'])
        print("展会的日期：",the_info[0].string)
        print("展会的开展地址：在info里面")
        print("展会的网页地址：", Gongyingshang_preurl + the_name[0]['href'])
#       写入CSV
        writer.writerow(['', '', the_name[0]['title'], '', '', the_info[0].string, '', '', '', '', '',
                         Gongyingshang_preurl + the_name[0]['href'], '', ''])

    return carList





#控制流程，调用上述函数完成数据抓取任务,main函数
if __name__ == '__main__':
# 001新建一个csv文件，用来写入数据
    filename = datetime.now().strftime('车展信息汇总-%Y-%m-%d-%H%M.csv')
    file = open(filename,'w',newline='',encoding='utf-8')
    writer = csv.writer(file)
    writer.writerow(
        ['序号', '月份', '展会', '类别', '举办地', '举办时间', '举办展馆', '展品范围', '主办机构', '机构性质', '重要度',
         '展会网址', '说明', '统计'])

# 002爬网址url,处理数据,一共三个页面，进行三次类似的操作

    # 全国第一展会网
    target_url_01 = "https://m.onezh.com/zhanhui/1_0_0_0_0/0/%E6%B1%BD%E8%BD%A6"
    html_content_01 = get_html_content(target_url_01)
    if html_content_01:
        result_list = parse_html_01(html_content_01)
    else:
        print("网页访问失败")

    # 世展网
    target_url_02 = "https://www.shifair.com/search/%E6%B1%BD%E8%BD%A6/1"
    html_content_02 = get_html_content(target_url_02)
    if html_content_02:
        result_list = parse_html_02(html_content_02)
    else:
        print("网页访问失败")

    # 中国供应商展会中心，关键词“汽车”
    target_url_03 = "https://fair.china.cn/search/%C6%FB%B3%B5.html"
    html_content_03 = get_html_content(target_url_03)
    if html_content_03:
        result_list = parse_html_03(html_content_03)
    else:
        print("网页访问失败")
# 关闭csv文件的写入操作
    file.close()

