#! -*- coding: utf-8 -*-

import scrapy
from zhcw_scraper.items import ZhcwItem

class ZhcwSpider(scrapy.Spider):
    name = 'zhcw'


    def __init__(self, name=None, **kwargs):
        super(ZhcwSpider, self).__init__(name, **kwargs)
        self.page = 1
        self.currentPage = 1
        self.url_formate = 'http://kaijiang.zhcw.com/zhcw/html/3d/list_%(index)s.html'

    @property
    def req_url(self):
        return self.url_formate % {"index": self.currentPage}

    def start_requests(self):
        yield scrapy.Request(url= self.req_url,
                             dont_filter=True,
                             callback=self.parse)

    def parse_data(self, response):
        self.logger.info("current url is ：" + response.url)
        _data = response.xpath('//table/tr[position()>=3]')[0:-1]
        res = []
        for tr in _data:
            zhcw = ZhcwItem()
            #开奖日期
            zhcw['date'] = tr.xpath('td[1]/text()').extract()[0].strip()
            #期号
            zhcw['issue'] = tr.xpath('td[2]/text()').extract()[0].strip()
            #中奖号码
            _num = tr.xpath('td[3]/em/text()').extract()
            zhcw['num'] = "".join(_num)
            zhcw['num1'] = _num[0]
            zhcw['num2'] = _num[1]
            zhcw['num3'] = _num[2]
            #单选
            text = tr.xpath('td[4]/text()').extract()
            zhcw['selecter'] = text[0].strip() if len(text) > 0 else ""

            text = tr.xpath('td[5]/text()').extract()
            zhcw['selecter3'] = text[0].strip() if len(text) > 0 else ""

            text = tr.xpath('td[6]/text()').extract()
            zhcw['selecter6'] = text[0].strip() if len(text) > 0 else ""

            #销售额
            text = tr.xpath('td[7]/strong/text()').extract()
            zhcw['sale_money'] = text[0].strip() if len(text) > 0 else ""

            #返奖比例
            text = tr.xpath('td[8]/text()').extract()
            zhcw['scale'] = text[0].strip().replace('%','') if len(text) > 0 else ""

            res.append(zhcw)
        return  res


    def parse(self, response):
        try:
            self.page = int(response.xpath('//table/tr[last()]/td/p/strong[1]/text()').extract()[0])
        except Exception as e:
            self.logger.info("Parse page error!")
            return

        self.logger.info("spider all page is %s"%self.page)

        for index in range(1, self.page + 1):
            self.currentPage = index
            yield scrapy.Request(url= self.req_url,
                                 dont_filter=True,
                                 callback=self.parse_data)






