import requests

from bs4 import BeautifulSoup
import  csv

# Step 1: 访问网页并获取响应内容
def get_html_content(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        html_content = response.text
        return html_content
    except Exception as e:
        print(f"网络请求异常：{e}")
        return None

# Step 2: 解析网页并提取目标数据
def parse_html(html_content):
    # 新建一个csv文件
    with open('output20230829.csv','w',newline='') as file:
        writer = csv.writer(file)
        writer.writerow(['展会的名字', '展会的图片地址','展会的日期','展会的地点'])
        soup = BeautifulSoup(html_content, 'html.parser')
        # TODO：根据需求编写解析代码，并将结果保存到合适的数据结构中
        # data_list = []
        print(type(soup))
        carList = soup.find_all('a', attrs={'class': 'mlcCar'})
        print(type(carList))

        # carimg = carList.find_all('img') 语法不行 element不能用find_all
        # print(carList[0].img['src'])

        # 遍历a标签列表，进行每个展会信息item_a的输出
        for item_a in carList:
            # print(type(item_a))           ——bs4.element.Tag
            # select返回的是一个列表
            the_img=item_a.select("div[class='mlcclPic'] img")
            the_name = item_a.select("div[class='mlccRight'] h2[class='mlccrTitle']")
            the_date = item_a.select("div[class='mlccRight'] p[class='mlccrTime']")
            the_local = item_a.select("div[class='mlccRight'] span[class='mlccrpPrice']")

            print("展会的名字：",the_name[0].string)
            print("展会的图片地址：",the_img[0]['src'])
            print("展会的日期：",the_date[0].string)
            print("展会的开展地址：",the_local[0].string.strip())
        #     写入CSV
            writer.writerow([the_name[0].string,the_img[0]['src'],the_date[0].string,the_local[0].string.strip()])

    return carList

# Step 3: 存储数据到本地或其他持久化存储服务器中
def store_data(result_list):
    # TODO：编写存储代码，将数据结果保存到本地或其他服务器中
    # print(result_list)
    return

# Step 4: 控制流程，调用上述函数完成数据抓取任务
if __name__ == '__main__':
# 输入要爬的网址url
#     target_url = "https://ssr1.scrape.center/"
    target_url = "https://m.onezh.com/zhanhui/1_0_0_0_0/0/%E6%B1%BD%E8%BD%A6"
    html_content = get_html_content(target_url)
    if html_content:
        result_list = parse_html(html_content)
        store_data(result_list)
    else:
        print("网页访问失败")

