import requests
from bs4 import BeautifulSoup
import re
import time
import pandas as pd

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0'
}


def main():
    # 定义url
    url = 'https://whlyj.beijing.gov.cn/zwgk/zxgs/tjxx/history/2023/'
    # 获取html内容
    html = req(url)
    # 解析url数据
    url_list = parse_url_data(html)
    # 解析数据
    data = parse_data(url_list)
    # 保存数据到csv文件
    save_data_to_csv(data['info'], data['columns'])


def req(url):
    # 发送get请求
    response = requests.get(url, headers=headers)
    # 返回响应内容
    return response.text


def parse_url_data(html):
    # 解析url数据
    soup = BeautifulSoup(html, 'lxml')
    # 查找所有href属性为./lyqhd/w+/w+_w+.html的a标签
    a = soup.find_all('a', attrs={'href': re.compile(r'\./lyqhd/\w+/\w+_\w+.html')})
    # 定义前缀
    prefix = "https://whlyj.beijing.gov.cn/zwgk/zxgs/tjxx/history/2023"
    # 解析出tourist_area_url_list
    tourist_area_url_list = [prefix + i['href'][1:] for i in a]
    # 返回tourist_area_url_list
    return tourist_area_url_list


def info_replace(info):
    # 替换info中的换行符、制表符、回车符、空格和中文空格
    return info.replace('\n', '').replace('\t', '').replace('\r', '').replace(' ', '').replace(' ', '')


def parse_data(url_list):
    # 解析数据
    info = []
    columns = []
    for i in url_list:
        # 获取html内容
        html = req(i)
        # 解析html内容
        soup = BeautifulSoup(html, 'lxml')
        # 查找tbody标签中的tr标签
        tr_list = soup.find('tbody').find_all('tr')[1:10]
        # 查找tr标签中的td标签
        cur_time = tr_list[0].find_all('td')[2]
        tds_list = [tr.find_all('td') for tr in tr_list[1:]]
        # 定义columns
        columns = [f'{info_replace(tds[0].text)}({info_replace(tds[1].text)})' for tds in tds_list]
        # 定义info
        info += [
            [f'2023年{info_replace(cur_time.text)}'] + [info_replace(tds[2].text) for tds in tds_list],
            [f'2022年{info_replace(cur_time.text)}'] + [info_replace(tds[3].text) for tds in tds_list]
        ]
        # 暂停1秒
        time.sleep(1)
    # 返回info和columns
    return {
        'info': info,
        'columns': columns
    }


def save_data_to_csv(data, columns):
    # 将数据保存到csv文件
    df = pd.DataFrame(data, columns=['日期'] + columns)
    df.to_csv('../static/data/info.csv', index=False, encoding='utf_8_sig')


if __name__ == '__main__':
    main()
