import requests

from bs4 import BeautifulSoup
import  csv
from datetime import datetime
import pandas as pd
import numpy as np
from bs4.element import TemplateString
# 处理web3时间等信息
# web3time = web3DF['举办时间']
# 完整显示

import datetime
import time
from datetime import datetime
from datetime import timedelta
from dateutil.relativedelta import relativedelta

#访问网页并获取响应内容(每个网页都要做的第一件事)
def get_html_content(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        html_content = response.text
        return html_content
    except Exception as e:
        print(f"网络请求异常：{e}")
        return None

#解析网页并提取目标数据，有三个网页，分别用函数parse_html_01、parse_html_02、parse_html_03来针对性爬取信息
# 全国第一展会网
def parse_html_01(html_content):
    Quanguozhanhui_preurl = 'https://m.onezh.com/'
    soup = BeautifulSoup(html_content, 'html.parser')
    carList = soup.find_all('div', attrs={'class': 'row'})
    # 遍历列表，进行每个展会信息的输出
    for item_a in carList:
        the_name = item_a.select("a")
        the_date = item_a.select("div[class='info'] div[class='cont'] em[class='cgree1']")
        the_local = item_a.select("div[class='info'] div[class='cont'] a")
        print('全国第一展会网',the_name[0]['title'])

        #     写入CSV
        writer.writerow(['','',the_name[0]['title'],'','',the_date[1].get_text(strip=True),'','','','','',Quanguozhanhui_preurl+the_name[0]['href'],'',''])

    return carList

# 02世展网
def parse_html_02(html_content):
    # Shizhanwang_preurl = 'https://www.shifair.com'
    soup = BeautifulSoup(html_content, 'html.parser')

    # 001 世展网的提取 关键词“汽车”,爬取五页，并且需要爬二级页面
    carList = soup.find_all('div', attrs={'class': 'info_list_block_item'})

    for item in carList:
        the_name = item.select("div[class='info_list_block_item_info'] h1[class='info_list_block_item_info_title'] a")
        the_date = item.select("div[class='info_list_block_item_info'] div[class='info_list_block_item_info_time']")

        #        根据车展的url地址，进入到当前车展的具体信息页面进行爬取另外的信息：
        target_inside = the_name[0]['href']
        html_content_inside = get_html_content(target_inside)
        if html_content_inside:
            soup_inside = BeautifulSoup(html_content_inside, 'html.parser')
            the_info = soup_inside.find_all('div', attrs={'class': 'exhibition_article_newbanner_info'})
            p_list = the_info[0].select("div[class='exhibition_top_info_des exhibition_top_newinfo_des'] p")
            the_local = p_list[1].select("span")[0].get_text(strip=True)
            the_master = p_list[3].select("span")[0].string
        print('世展网', the_name[0].string)
#     写入CSV
        writer.writerow(['','',the_name[0].string,'',the_local,the_date[0].string,'','',the_master,'','',the_name[0]['href'],'',''])

    return carList

# 03 中国供应商网
def parse_html_03(html_content):
    soup = BeautifulSoup(html_content, 'html.parser')
    # 在此处进行数据提取
    Gongyingshang_preurl = 'https://fair.china.cn'

    # 供应商展会网的提取 关键词“车”
    carList = soup.find_all('li',attrs={'class':'fl-clr'})
    for item in carList:
        the_name = item.select("div[class='ex-titlename fl-clr'] a[class='title-ex']")
        the_info = item.select("div[class='fl-left handle-hd'] p[class='detial-bd']")
        # the_info内容巨多：举办日期：2023.11.01-03 | 城市：广州 | 举办场馆：保利世贸博览馆 | 行业：电动车 | 举办周期：1年 | 举办次数：***0届 | 展出面积：20000平米 | 上届展商数：待核实 | 上届观众数：待核实
        # 需要通过正则表达式来筛选一下信息
        print('中国供应商网',the_name[0]['title'])
#       写入CSV
        writer.writerow(['', '', the_name[0]['title'], '', '', the_info[0].string, '', '', '', '', '',
                         Gongyingshang_preurl + the_name[0]['href'], '', ''])

    return carList

# 04 maigoo车展网站
def parse_html_04(html_content):
    soup = BeautifulSoup(html_content, 'html.parser')
    carList = soup.find_all('tr', attrs={'class': 'li font14'})
    print(carList[0])
    # 遍历列表，进行每个展会信息的输出
    for item_a in carList:
        the_td = item_a.select("td")

        the_name =the_td[1].string
        the_date = the_td[2].string
        the_local = the_td[3].string
        the_url = the_td[1].select("a")[0]['href']


        # 开始爬二级页面：
        target_inside = the_url
        html_content_inside = get_html_content(target_inside)
        if html_content_inside:
            soup_inside = BeautifulSoup(html_content_inside, 'html.parser')

            the_info_all = soup_inside.find_all('div', attrs={'class': 'infobox'})
            the_new_name = the_info_all[0].select("div[class='name font24 dhidden b']")[0].string
            the_li_list = the_info_all[0].select("ul[class='font16'] li")
            # the_new_url = the_li_list[-1].select("a")[0]['href']
            the_new_url = the_li_list[-1].select("a")
            if the_new_url:
                the_new_url = the_new_url[0]['href']

            # print("二级页面的名字：",the_new_name,"正确的官网网址：",the_new_url)

        #写入CSV  二级页面的展会有的不提供官网网址，那么就留一级页面的网址
        if the_new_url:
            writer.writerow(['','',the_new_name,'','',the_date,the_local,'','','','',the_new_url,'',''])
        else:
            writer.writerow(['', '', the_new_name, '', '', the_date, the_local, '', '', '', '', the_url, '', ''])

    return carList



#控制流程，调用上述函数完成数据抓取任务,main函数
if __name__ == '__main__':
# 存储网址信息，有24个网址，三个主要网址，每个网址下有八个子网址
    url_list=[[
        'https://www.onezh.com/zhanhui/1_0_0_0_0/0/%E8%BD%A6',
        'https://www.onezh.com/zhanhui/1_0_0_0_0/0/%E6%99%BA%E8%83%BD',
        'https://www.onezh.com/zhanhui/1_0_0_0_0/0/%E4%BC%A0%E6%84%9F',
        'https://www.onezh.com/zhanhui/1_0_0_0_0/0/%E5%85%89',
        'https://www.onezh.com/zhanhui/1_0_0_0_0/0/%E7%94%B5',
        'https://www.onezh.com/zhanhui/1_0_0_0_0/0/%E4%BA%92%E8%81%94',
        'https://www.onezh.com/zhanhui/1_0_0_0_0/0/%E7%BD%91%E8%81%94',
        'https://www.onezh.com/zhanhui/1_0_0_0_0/0/%E7%81%AF'
    ],[
        'https://www.shifair.com/exhibition/1-307-0-0-0-0-add_time-desc-1/',
        'https://www.shifair.com/exhibition/1-307-0-0-0-0-add_time-desc-2/',
        'https://www.shifair.com/exhibition/1-32-0-0-0-0-add_time-desc/',
        'https://www.shifair.com/exhibition/1-3-0-0-0-0-add_time-desc/',
        'https://www.shifair.com/exhibition/1-309-0-0-0-0-add_time-desc/',
        'https://www.shifair.com/exhibition/1-30-0-0-0-0-add_time-desc/',
        'https://www.shifair.com/exhibition/1-31-0-0-0-0-add_time-desc/',
        'https://www.shifair.com/exhibition/1-28-0-0-0-0-add_time-desc/'
    ],[
        'https://fair.china.cn/search/%B3%B5.html',
        'https://fair.china.cn/search/%D6%C7%C4%DC.html',
        'https://fair.china.cn/search/%B4%AB%B8%D0.html',
        'https://fair.china.cn/search/%B9%E2.html',
        'https://fair.china.cn/search/%B5%E7.html',
        'https://fair.china.cn/search/%BB%A5%C1%AA.html',
        'https://fair.china.cn/search/%CD%F8%C1%AA.html',
        'https://fair.china.cn/search/%B5%C6.html'
    ]]
# 001新建一个csv文件，用来写入数据，并打开
    filename = datetime.now().strftime('车展信息汇总-%Y-%m-%d-%H%M')
    file = open(filename+'.csv','w',newline='',encoding='utf-8')
    writer = csv.writer(file)
    writer.writerow(
        ['序号', '月份', '展会', '类别', '举办地', '举办时间', '举办展馆', '展品范围', '主办机构', '机构性质', '重要度',
         '展会网址', '说明', '统计'])

# 002爬网址url,处理数据,一共三个主页面，进行三次类似的操作，具体实现24个网址的爬取
    i=0
    j=0
    # 逻辑概要： 遍历二维数组
    # while(i<3):
    #     while(j<8):
    #         print(url_list[i][j],'现在的i，j分别为：',i,j)
    #         j=j+1
    #     i=i+1
    #     j=0

    while (i < 3):
        while (j < 8):
            if(i==0):
                # 全国第一展会网
                target_url_01 = url_list[i][j]
                print('进入网址：',target_url_01)
                html_content_01 = get_html_content(target_url_01)
                if html_content_01:
                    result_list = parse_html_01(html_content_01)
                else:
                    print("网页访问失败")
            if(i==1):
                # 世展网
                target_url_02 = url_list[i][j]
                print('进入网址：', target_url_02)
                html_content_02 = get_html_content(target_url_02)
                if html_content_02:
                    result_list = parse_html_02(html_content_02)
                else:
                    print("网页访问失败")
            if(i==2):
                # 中国供应商展会中心
                target_url_03 = url_list[i][j]
                print('进入网址：', target_url_03)
                html_content_03 = get_html_content(target_url_03)
                if html_content_03:
                    result_list = parse_html_03(html_content_03)
                else:
                    print("网页访问失败")

            j = j + 1
        i = i + 1
        j = 0

# 爬取maigoo网站的网址
    target_url_04 = 'https://www.maigoo.com/news/665605.html'

    html_content_04 = get_html_content(target_url_04)
    if html_content_04:
        result_list = parse_html_04(html_content_04)
    else:
        print("网页访问失败")

#003 关闭csv文件的写入操作
    file.close()



#004进行数据处理
    print("start data_process")
    filestr = './'+filename+'.csv'
    DataDF = pd.read_csv(filestr,encoding = "utf-8",dtype = str)
    # DataDF = pd.read_csv('./车展信息汇总-2023-10-11-1310.csv',encoding = "utf-8",dtype = str)

    pd.set_option('display.max_colwidth',200)

    print("web1")
    #提取web1数据
    web1DF = DataDF[DataDF['举办时间'].str.contains(r"\d{4}年\d{1,2}月\d{1,2}日")]

    #去除无关字符
    web1DF['举办时间'] = web1DF['举办时间'].str.replace(u'\xa0\xa0',u' ')
    web1DF['举办时间'] = web1DF['举办时间'].str.replace('\t','')
    web1DF['举办时间'] = web1DF['举办时间'].str.replace('\r\n','')
    web1DF[['地区','时间','展馆','其他']]=web1DF['举办时间'].str.split(' ',expand=True)

    web1DF['地区'] = web1DF['地区'].str.lstrip('地区：')
    web1DF['展馆'] = web1DF['展馆'].str.lstrip('展馆：')
    web1DF['时间'] = web1DF['时间'].str.lstrip('展会时间：')

    web1DF['举办地'] = web1DF['地区']
    web1DF['举办展馆'] = web1DF['展馆']
    web1DF['举办时间'] = web1DF['时间']

    web1DF = web1DF.drop(['时间','地区','展馆','其他'],axis=1)

    #处理web1时间
    web1time = web1DF['举办时间']

    web1time1 = web1DF['举办时间'].map(lambda x:x.split('---')[0])
    web1time2 = web1DF['举办时间'].map(lambda x:x.split('---')[1])

    #web1时间格式化
    web1time1 = web1time1.apply(lambda x: datetime.strptime(x,'%Y年%m月%d日'))
    web1tmp = web1time1.astype(str).str.rsplit('-',expand = True)[0]
    web1time2 = web1tmp+'-'+web1time2
    web1time2 = web1time2.apply(lambda x: datetime.strptime(x,'%Y-%m月%d日'))


    #合并web1数据
    web1time1 = web1time1.rename('time1')
    web1time2 = web1time2.rename('time2')
    time1DF = web1time1.to_frame().join(web1time2)
    web1DF = web1DF.join(time1DF)

    print("web2")
    #提取web2数据
    web2DF = DataDF[DataDF['举办时间'].str.contains('~')]
    web2DF[['地区','展馆']] = web2DF['举办地'].str.split('-',expand = True)
    web2DF['举办地'] = web2DF['展馆'].str[:2]
    web2DF['举办展馆'] = web2DF['展馆'].str[2:]
    web2DF = web2DF.drop(['地区','展馆'],axis=1)

    #处理web2时间
    web2time = web2DF['举办时间']
    web2time1 = web2DF['举办时间'].map(lambda x:x.split('~')[0])
    web2time2 = web2DF['举办时间'].map(lambda x:x.split('~')[1])
    web2tmp = web2time1.str.rsplit('.',expand = True)[0]
    web2time2 = web2tmp+'-'+web2time2

    #web2时间格式化
    web2time1 = web2time1.apply(lambda x: datetime.strptime(x,'%Y.%m.%d'))
    web2time2 = web2time2.apply(lambda x: datetime.strptime(x,'%Y-%m.%d'))

    #合并web2数据
    web2time1 = web2time1.rename('time1')
    web2time2 = web2time2.rename('time2')
    time2DF = web2time1.to_frame().join(web2time2)

    web2DF = web2DF.join(time2DF)

    print("web3")
    #提取web3数据
    web3DF = DataDF[DataDF['举办时间'].str.contains("举办日期")]
    web3DF[['时间','城市','举办场所','行业','举办周期','举办次数','展出面积','上届展商数','上届观众数']]=web3DF['举办时间'].str.split('|',expand=True)

    web3DF['行业'] = web3DF['行业'].str.lstrip(' 行业：')
    web3DF['城市'] = web3DF['城市'].str.lstrip(' 城市：')
    web3DF['举办场所'] = web3DF['举办场所'].str.lstrip(' 举办场馆：')
    web3DF['时间'] = web3DF['时间'].str.lstrip(' 举办日期：')

    web3DF['举办地'] = web3DF['城市']
    web3DF['举办展馆'] = web3DF['举办场所']
    web3DF['类别'] = web3DF['行业']
    web3DF['举办时间'] = web3DF['时间']

    web3DF = web3DF.drop(['时间','城市','举办场所','行业','举办周期','举办次数','展出面积','上届展商数','上届观众数'],axis=1)


    web3time1 = web3DF['举办时间'].map(lambda x:x.split('-')[0])
    web3time2 = web3DF['举办时间'].map(lambda x:x.split('-')[1])

    #筛选年份不完整
    index = web3time1.index[~web3time1.str.contains(r"\d{4}\.\d{1,2}\.\d{1,2}")]
    web3time1[index] = "20"+web3time1[index].astype('str')

    #筛选形如5.29~6.2
    index = web3time2.index[web3time2.str.contains('\.')]
    web3time2[index] = web3time2[index].str.rsplit('.',expand = True)[1]
    web3time2 = web3time2.str.strip(' ')
    web3tmp1 = web3time1.str.rsplit('.',expand = True)[0]
    web3tmp2 = web3time1.str.rsplit('.',expand = True)[1]
    # print("('.',1,expand = True)[0]")
    # print(web3tmp1)
    # print("('.',expand = True)[1]")
    # print(web3tmp2)

    web3time2 = web3tmp1+'.'+web3tmp2+'.'+web3time2


    #web3时间格式化
    web3time1 = web3time1.apply(lambda x: datetime.strptime(x,'%Y.%m.%d'))
    web3time2 = web3time2.apply(lambda x: datetime.strptime(x,'%Y.%m.%d'))
    web3time1 = web3time1.rename('time1')
    web3time2 = web3time2.rename('time2')

    test3DF = web3time1.to_frame().join(web3time2)
    web3DF = web3DF.join(test3DF)

    #合并web1 web2 web3
    outDF = pd.concat([web1DF,web2DF,web3DF])

    #检查时间数据正确性
    for idx,data in outDF.iterrows():
      if data[14]>data[15]:
        data[15] = data[15]+relativedelta(months = 1)

    outDF['举办时间'] = outDF['time1'].dt.strftime("%Y-%m-%d")+'——'+outDF['time2'].dt.strftime("%Y-%m-%d")

    outDF['月份'] = outDF['time1'].dt.month
    outDF['月份'] = outDF['月份'].astype(str)+"月"


    #删除已过期信息
    outDF = outDF.set_index('time1')

    outDF.sort_index(inplace = True)
    outDF = outDF.truncate(before = datetime.now())

    outDF.reset_index(inplace = True)

    #删除重复信息
    outDF.drop_duplicates(inplace=True)

    order = ['序号','月份','展会','类别','举办地','举办时间','举办展馆','展品范围','主办机构','机构性质','重要度','展会网址','说明','统计','time1','time2']
    outDF = outDF[order]

    outDF['time1'] = outDF['time1'].apply(lambda x:x.strftime('%Y-%m-%d'))

    outDF['time2'] = outDF['time2'].apply(lambda x:x.strftime('%Y-%m-%d'))

    #outDF.to_excel('./new1'+filename+'.xlsx',encoding = "utf-8",index = False)
    # outDF.to_excel('./new1'+filename+'.xlsx',index = False)

    outDF.to_csv('./final'+filename+'.csv',encoding = "utf-8",index = False)
    # outDF.to_csv('./new1.csv',encoding = "utf-8",index = False)



