import scrapy
from ..items import WeatherItem
citycode={
'石家庄': '101090101',
'保定': '101090201',
'张家口': '101090301',
'承德': '101090402',
'唐山': '101090501',
'廊坊': '101090601',
'沧州': '101090701',
'衡水': '101090801',
'邢台': '101090901',
'邯郸': '101091001',
'秦皇岛': '101091101',
}
class WeaspiderSpider(scrapy.Spider):
    name = 'weaSpider'
    allowed_domains = ['www.weather.com.cn']
    start_urls=[]
    for c in citycode.values():
        start_urls.append('http://www.weather.com.cn/weather/'+c+'.shtml')

    def parse(self, response):
        weas = response.xpath("//*[@id='7d']/ul/li")
        city=response.xpath("//*[@class='crumbs fl']/a[3]/text()").extract()[0]
        items = []
        for wea in weas:
            item = WeatherItem()
            item["city"] =city
            item["date"] =wea.xpath('h1/text()').extract()[0]
            item["wea"] = wea.xpath('p[1]/text()').extract()[0]
            item["tem"]=wea.xpath('p[2]/span/text()').extract()[0]+"/"+wea.xpath('p[2]/i/text()').extract()[0]
            items.append(item)
        return items



