import scrapy
from sdWeatherSpider.items import SdweatherspiderItem
from urllib.parse import urljoin


class EverycityinSDSpider(scrapy.Spider):
    name = 'everyCityinSD'
    allowed_domains = ['www.weather.com.cn']

    # 从settings获取省份列表，生成起始URL（各省份首页）
    provinces = scrapy.utils.project.get_project_settings().get('PROVINCES', [])
    start_urls = [f'http://www.weather.com.cn/{province}/index.shtml' for province in provinces]

    def parse(self, response):
        """解析省份首页，提取城市天气页链接"""
        province = response.url.split('/')[-2]

        # 提取城市链接和名称
        city_links = response.xpath(
            f'//a[contains(@href, "/weather/") and contains(@title, "天气预报")]/@href'
        ).getall()
        city_names = response.xpath(
            f'//a[contains(@href, "/weather/") and contains(@title, "天气预报")]/text()'
        ).getall()

        # 确保链接和名称一一对应
        city_data = list(zip(city_links, city_names))

        for link, name in city_data:
            city_url = urljoin(response.url, link)
            if '/weather/' in city_url and city_url.endswith('.shtml'):
                yield scrapy.Request(
                    url=city_url,
                    callback=self.parse_city_weather,
                    meta={'province': province, 'city': name},  # 这里传递城市名称
                    errback=self.errback_city
                )

    def parse_city_weather(self, response):
        item = SdweatherspiderItem()

        # 从meta中获取城市名（原逻辑不变）
        item['city'] = response.meta.get('city', '未知城市')

        # 新增：从当前页面URL中提取城市编码
        # 示例URL: http://www.weather.com.cn/weather/101010400.shtml
        current_url = response.url
        city_code = current_url.split('/')[-1].split('.')[0]  # 提取"101010400"
        item['city_code'] = city_code  # 存入Item

        # 原有的天气数据提取逻辑（不变）
        try:
            weather_list = []
            weather_container = response.xpath('//ul[@class="t clearfix"]')
            if not weather_container:
                raise ValueError("未找到天气列表容器")

            for day in weather_container.xpath('./li'):
                date = day.xpath('./h1/text()').get(default='未知日期').strip()
                weather_desc = day.xpath('./p[@title]/text()').get(default='未知天气').strip()
                temp_high = day.xpath('./p[@class="tem"]/span/text()').get(default='')
                temp_low = day.xpath('./p[@class="tem"]/i/text()').get(default='')
                temp = f'{temp_high}/{temp_low}' if temp_high else temp_low
                wind = (day.xpath('./p[@class="win"]/em/@title').get(default='') +
                        day.xpath('./p[@class="win"]/i/text()').get(default='')).strip()
                wind = wind if wind else '未知风力'
                weather_list.append(f'{date}:{weather_desc},{temp},{wind}')

            item['weather'] = '\n'.join(weather_list)

        except Exception as e:
            self.logger.error(f'城市{item["city"]}天气解析失败: {str(e)}')
            item['weather'] = '数据解析异常'

        yield item

    def errback_city(self, failure):
        """请求失败处理（记录错误）"""
        self.logger.error(f'请求失败: {failure.request.url} - {repr(failure)}')
