# -*- coding: utf-8 -*-
import scrapy
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from zggangcai.items import ZggangcaiItem

class GangcaiSpider(scrapy.Spider):
    name = 'gangcai'
    allowed_domains = ['www.zh818.com']
    start_urls = ['http://res.zh818.com/Wen/map/map-new.htm']

    cook_1={'_jzqx':'1.1525420466.1525420466.1.jzqsr=zh818%2Ecom|jzqct=/.-',
	'_jzqa':'1.2731910549753076700.1525420466.1525420466.1525660896.2',
	'_jzqckmp':'1',
	'Hm_lvt_a4fed9b8efe84d560b5eef88e584f4f1':'1525661133',
	'ASP.NET_SessionId':'b5epa5uzlake21k15toqtx3m',
	'Hm_lvt_02d9b64797c66dc373662947ceef6283':'1525420304,1525420763,1525656974,1525742328',
	'.zh818.com':'memname=13611063347&Password=75346134f16f24c0&logout=0&memid=544577',
	'Hm_lpvt_02d9b64797c66dc373662947ceef6283':'1525746184'}
    def parse(self, response):
        # 获取华东地区省会列表页面的ｕｒｌ并且发出请求
        for i in range(2,7):
            url=response.xpath('//*[@id="A-nei"]/div[3]/div[%s]/div[2]/div[1]/a[2]/@href'%str(i)).extract_first()
            addr=response.xpath('//*[@id="A-nei"]/div[3]/div[%s]/div[2]/div[1]/a[2]/text()'%str(i)).extract_first()
            meta={}
            meta['url']=url
            meta['addr']=addr
            meta['area']='华东'
            yield scrapy.Request(url=url,callback=self.parse_1,meta=meta)

         # 获取华北地区省会列表页面的ｕｒｌ并且发出请求
        for i in range(2,5):
             url=response.xpath('//*[@id="A-nei"]/div[4]/div[%s]/div[2]/div[1]/a[2]/@href'%str(i)).extract_first()
             addr=response.xpath('//*[@id="A-nei"]/div[4]/div[%s]/div[2]/div[1]/a[2]/text()'%str(i)).extract_first()
             meta={}
             meta['url'] = url
             meta['addr']=addr
             meta['area']='华北'
             yield scrapy.Request(url=url,callback=self.parse_1,meta=meta)

         # 获取华南地区省会列表页面的ｕｒｌ并且发出请求
        for i in range(2, 5):
             url = response.xpath('//*[@id="A-nei"]/div[5]/div[%s]/div[2]/div[1]/a[2]/@href'%str(i)).extract_first()
             addr = response.xpath('//*[@id="A-nei"]/div[5]/div[%s]/div[2]/div[1]/a[2]/text()'%str(i)).extract_first()
             meta = {}
             meta['url'] = url
             meta['addr'] = addr
             meta['area'] = '华南'
             yield scrapy.Request(url=url, callback=self.parse_1, meta=meta)

         # 获取华中地区省会列表页面的ｕｒｌ并且发出请求
        for i in range(2, 5):
             url = response.xpath('//*[@id="A-nei"]/div[6]/div[%s]/div[2]/div[1]/a[2]/@href'%str(i)).extract_first()
             addr = response.xpath('//*[@id="A-nei"]/div[6]/div[%s]/div[2]/div[1]/a[2]/text()'%str(i)).extract_first()
             meta = {}
             meta['url'] = url
             meta['addr'] = addr
             meta['area'] = '华中'  #1月份
             yield scrapy.Request(url=url, callback=self.parse_1, meta=meta)

         # 获取东北地区省会列表页面的ｕｒｌ并且发出请求
        for i in range(2, 5):
             url = response.xpath('//*[@id="A-nei"]/div[7]/div[%s]/div[2]/div[1]/a[2]/@href'%str(i)).extract_first()
             addr = response.xpath('//*[@id="A-nei"]/div[7]/div[%s]/div[2]/div[1]/a[2]/text()'%str(i)).extract_first()
             meta = {}
             meta['url'] = url
             meta['addr'] = addr
             meta['area'] = '东北'
             yield scrapy.Request(url=url, callback=self.parse_1, meta=meta)

         # 获取西南地区省会列表页面的ｕｒｌ并且发出请求
        for i in range(2, 5):
             url = response.xpath('//*[@id="A-nei"]/div[8]/div[%s]/div[2]/div[1]/a[2]/@href'%str(i)).extract_first()
             addr = response.xpath('//*[@id="A-nei"]/div[8]/div[%s]/div[2]/div[1]/a[2]/text()'%str(i)).extract_first()
             meta = {}
             meta['url'] = url
             meta['addr'] = addr
             meta['area'] = '西南'
             yield scrapy.Request(url=url, callback=self.parse_1, meta=meta)

        for i in range(2, 7):
             url = response.xpath('//*[@id="A-nei"]/div[9]/div[%s]/div[2]/div[1]/a[2]/@href'%str(i)).extract_first()
             addr = response.xpath('//*[@id="A-nei"]/div[9]/div[%s]/div[2]/div[1]/a[2]/text()'%str(i)).extract_first()
             meta = {}
             meta['url'] = url
             meta['addr'] = addr
             meta['area'] = '西北'
             yield scrapy.Request(url=url, callback=self.parse_1, meta=meta)

    def parse_1(self,response):
        meta=response.meta
        page_url=meta['url']
        page_url=page_url.replace('g','G')
        for i in range(1,50):
            yield scrapy.Request(url=page_url+'/index_%s.html'%str(i),callback=self.parse_3,meta=meta,cookies=self.cook_1)

    def parse_3(self,response):
        meta = response.meta
        trs = response.xpath('//*[@id="content2010-text"]/table[1]//tr')
        print '----len(trs) == ',len(trs)
        for tr in trs:
            url = tr.xpath('td[1]/a/@href').extract_first()
            url = response.urljoin(url)
            title = tr.xpath('td[1]/a/text()').extract_first()
            meta['title'] = title
            if '建筑钢材市场价格行情' in title:
                yield scrapy.Request(url=url, callback=self.parse_2, meta=meta,cookies=self.cook_1)

    def parse_2(self,response):
        meta=response.meta
        trs=response.xpath('//*[@id="zoom"]/table//tr')[2:]
        print '----------------',len(trs)
        year=response.xpath('//*[@id="content-copyright"]/text()').re('\d+')
        time = year[0] +'-'+year[1]+'-'+year[2]
        for tr in trs:
            name=tr.xpath('td[1]/text()').extract_first()
            formatr_1 = tr.xpath('td[2]/text()').extract_first()
            material = tr.xpath('td[3]/text()').extract_first()
            src = tr.xpath('td[4]/text()').extract_first()
            price = tr.xpath('td[5]/text()').extract_first()
            item=ZggangcaiItem()
            item['title']= meta['title']
            item['addr']=meta['addr']
            item['area']= meta['area']
            item['time_1']=time
            try :
                name=name.strip()
            except:
                name='1'
            item['name_1']= name
            try:
                formatr_1=formatr_1.strip()
            except:
                formatr_1='1'
            item['formatr_1']= formatr_1
            try:
                material=material.strip()
            except:
                material='1'
            item['material']= material
            try:
                src=src.strip()
            except:
                src='1'
            item['src']= src
            try:
                price=price.strip()
            except:
                price='1'
            item['price']=price
            print '-------------------'
            for i in item.values():
                print i
            yield item



