import pymysql
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random

def insert_data(conn, cursor, data):
    """
    将提取的天气数据插入到数据库中
    :param conn: 数据库连接
    :param cursor: 数据库游标
    :param data: 要插入的数据
    """
    sql = "INSERT INTO weather_info (date, max_temp, min_temp, weather_type, wind_direction) " \
          "VALUES (%s, %s, %s, %s, %s)"
    values = data.values.tolist()
    cursor.executemany(sql, values)
    conn.commit()

def spider_weather(date, current_timestamp, conn):
    # 发送请求，获取天气数据
    url = f"https://lishi.tianqi.com/beijing/{date}.html"

    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36",
        "Referer": "https://lishi.tianqi.com/",
        "Accept-Language": "en-US,en;q=0.9"
    }
    cookies = {
        "UserId": "17102285395096421",
        "Hm_lvt_7c50c7060f1f743bccf8c150a646e90a": "1710228540",
        "Hm_lvt_30606b57e40fddacb2c26d2b789efbcb": "1710228553",
        "Hm_lpvt_30606b57e40fddacb2c26d2b789efbcb": "1710228553",
        "Hm_lpvt_7c50c7060f1f743bccf8c150a646e90a": str(current_timestamp)
    }

    response = requests.get(url, headers=headers, cookies=cookies)
    html_content = response.text

    # 使用BeautifulSoup解析HTML内容
    soup = BeautifulSoup(html_content, "html.parser")

    # 提取类名为"thrui"的容器
    containers = soup.find_all(class_="thrui")

    # 遍历每个容器
    for container in containers:
        # 提取每个li标签
        li_tags = container.find_all("li")

        # 遍历每个li标签
        for li_tag in li_tags:
            # 提取每个div容器的文本内容
            div_contents = [div.text.strip() for div in li_tag.find_all("div")]

            # 输出提取到的文本内容
            if len(div_contents) == 5:
                date, max_temp, min_temp, weather, wind = div_contents
                print("日期:", date)
                print("最高气温:", max_temp)
                print("最低气温:", min_temp)
                print("天气:", weather)
                print("风向:", wind)
                print()

                # 插入数据到数据库
                cursor = conn.cursor()
                insert_data(conn, cursor, pd.DataFrame({
                    'date': [date],
                    'max_temp': [max_temp],
                    'min_temp': [min_temp],
                    'weather_type': [weather],
                    'wind_direction': [wind]
                }))
                cursor.close()


# 连接到数据库
conn = pymysql.connect(host='localhost', port=3306, user='root', password='123456', db='food_price')

# 创建一个dataframe用以存储数据
weather_info = pd.DataFrame(
    columns=['date', 'max_temp', 'min_temp', 'weather_type', 'wind_direction']
)

# 从datetime模块和dateutil.relativedelta模块导入相关函数和类
from datetime import datetime
from dateutil.relativedelta import relativedelta

start_date = datetime(2022, 1, 3)  # 开始日期为2022年1月3日
end_date = datetime(2023, 3, 12)  # 结束日期为2023年3月12日
current_timestamp = int(time.time()) # 当前时间戳
current_date = start_date

while current_date <= end_date:
    date_str = current_date.strftime("%Y%m")  # 将日期格式化为"年份+月份"的样式，如"202303"
    print(f"---------------正在爬取{date_str}的天气情况---------------")
    # 开始爬取相关天气
    spider_weather(date=date_str, current_timestamp=current_timestamp, conn=conn)
    print(f"---------------爬取{date_str}天气完成---------------")
    # 生成随机暂停时间
    sleep_time = random.uniform(30, 60)
    # 随机暂停
    time.sleep(sleep_time)
    current_date += relativedelta(months=1)  # 按照1个月的间隔增加日期

conn.close()
