'''
代码目前报错
'''

import requests
from lxml import etree
from lxml import html
import pandas as pd
import time
import json
import os
from datetime import datetime
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[logging.FileHandler("weather_scraper.log"), logging.StreamHandler()]
)
logger = logging.getLogger("weather_scraper")

# 确保数据目录存在
DATA_DIR = "weather_data"
os.makedirs(DATA_DIR, exist_ok=True)


# def get_html(city, month=None):
#     """获取指定城市和月份的天气HTML数据"""
#     if month is None:
#         month = datetime.now().strftime("%Y%m")
#
#     headers = {
#         "Accept-Encoding": "Gzip",
#         "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
#     }
#
#     # 这里使用天气网作为示例
#     # 实际URL需要根据目标网站调整
#     url = f"https://lishi.tianqi.com/{city}/{month}.html"
#
#     try:
#         response = requests.get(url, headers=headers, timeout=10)
#
#         if response.status_code == 200:
#             logger.info(f"Successfully fetched data for {city} in {month}")
#             # tree = html.fromstring(response.content)
#             tree = etree.HTML(response.text)
#             print(tree)
#             return tree
#         else:
#             logger.error(f"Failed to fetch data: HTTP {response.status_code}")
#             return None
#     except Exception as e:
#         logger.error(f"Error fetching data for {city} in {month}: {e}")
#         return None
def get_html(city, month=None):
    headers = {
        "Accept-Encoding": "Gzip",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
        }
    url = f'https://lishi.tianqi.com/{city}/{month}.html'

    r = requests.get(url, headers=headers)
    print(r)
    r_html = etree.HTML(r.text)

    return r_html

def parse_weather_data(html_tree, city, month):
    time.sleep(1)
    """解析HTML获取天气数据"""
    if html_tree is None:
        return None
    weather_data = []

    html_tree = get_html(city, month)
    try:
        # 以天气网的HTML结构为例
        # 实际XPath需要根据网站HTML结构调整

        # 获取所有数据行
        # rows = html_tree.xpath('//ul[@class="thrui"]/li')
        rows = html_tree.xpath('.//div[@class="tian_three"]')
        print(rows)
        if rows:
            div = rows[0]
            lis = div.xpath('.//li')
            print(lis)
            for li in lis:
                print(li.xpath('./div[@class="th200"]/text()')[0])
                record = {
                    'date': li.xpath('./div[@class="th200"]/text()')[0],
                    'high_temp': li.xpath('./div[@class="th140"]/text()')[0],
                    'low_temp': li.xpath('./div[@class="th140"]/text()')[1],
                    'weather': li.xpath('./div[@class="th140"]/text()')[2],
                    'wind_direction': li.xpath('./div[@class="th140"]/text()')[3],
                    'wind_force': li.xpath('./div[@class="th140"]/text()')[4]
                }
                weather_data.append(record)
        # for row in rows:
        #     # 解析每一行数据
        #     date = row.xpath('./div[1]/text()')[0].strip()
        #     high_temp = row.xpath('./div[2]/text()')[0].strip()
        #     low_temp = row.xpath('./div[3]/text()')[0].strip()
        #     weather = row.xpath('./div[4]/text()')[0].strip()
        #     wind_direction = row.xpath('./div[5]/text()')[0].strip()
        #     wind_force = row.xpath('./div[6]/text()')[0].strip()
        #
        #     # 构建数据记录
        #     record = {
        #         "date": date,
        #         "high_temp": high_temp,
        #         "low_temp": low_temp,
        #         "weather": weather,
        #         "wind_direction": wind_direction,
        #         "wind_force": wind_force
        #     }



        logger.info(f"Parsed {len(weather_data)} records for {city} in {month[0]}")
        return weather_data
    except Exception as e:
        logger.error(f"Error parsing data for {city} in {month[0]}: {e}")
        return None


def save_weather_data(data, city, month):
    """保存天气数据到JSON文件"""
    if data is None or len(data) == 0:
        logger.warning(f"No data to save for {city} in {month}")
        return False

        # 确保城市目录存在
    city_dir = os.path.join(DATA_DIR, city)
    os.makedirs(city_dir, exist_ok=True)

    # 保存到JSON文件
    filename = os.path.join(city_dir, f"{month}.json")

    try:
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        logger.info(f"Saved data to {filename}")
        return True
    except Exception as e:
        logger.error(f"Error saving data: {e}")
        return False


def scrape_city_weather(city, months=None):
    """爬取指定城市一个或多个月份的天气数据"""
    if months is None:
        # 默认爬取当前月
        months = [datetime.now().strftime("%Y%m")]

        # 确保 months 是列表且元素是字符串
    if not isinstance(months, list):
        # 如果是 Pandas Index 对象，转换为列表
        months = list(map(str, months))
    else:
        # 确保列表中的每个元素都是字符串
        months = [str(m) for m in months]

    results = {}

    for month in months:
        logger.info(f"Scraping {city} weather data for {month}")

        # 获取HTML
        html_tree = get_html(city, month)

        # 解析数据
        weather_data = parse_weather_data(html_tree, city, month)

        # 保存数据
        if weather_data:
            save_weather_data(weather_data, city, month)
            results[month] = len(weather_data)
        else:
            results[month] = 0

            # 避免请求过于频繁
        time.sleep(2)

    return results


def bulk_scrape(cities, months=None):
    """批量爬取多个城市的天气数据"""
    results = {}

    for city in cities:
        results[city] = scrape_city_weather(city, months)
        # 城市之间增加间隔，避免IP被封
        time.sleep(5)

    return results


if __name__ == "__main__":
    # 测试爬虫
    # cities = ["beijing", "shanghai", "guangzhou", "shenzhen"]
    cities = ["beijing"]
    # current_month = datetime.now().strftime("%Y%m")
    current_month = pd.period_range('202502','202502',freq='M').strftime('%Y%m')

    print(f"Starting weather data scraping for {len(cities)} cities...")
    results = bulk_scrape(cities, [current_month])

    print("\nScraping Results:")
    for city, data in results.items():
        for month, count in data.items():
            print(f"  {city} ({month}): {count} records")