from commonresources.spiders.basespider import BaseSpider
from scrapy.selector import Selector
from scrapy.http import Request
import re

class YanChengShiZhengFuCaiGouWangSpider(BaseSpider):
    '''
        盐城市政府采购网 http://czj.yancheng.gov.cn/col/col2383/index.html
        '''
    name = 'YanChengShiZhengFuCaiGouWang'
    name_zh = '盐城市政府采购网'
    province = "江苏"
    city = '盐城'
    allowed_domains = ['czj.yancheng.gov.cn']
    start_urls = ['http://czj.yancheng.gov.cn/col/col20138/index.html',
                  'http://czj.yancheng.gov.cn/col/col20174/index.html']

    def __init__(self,full_dose=False):
        super(YanChengShiZhengFuCaiGouWangSpider,self).__init__(full_dose)
        self.current_domain = "http://czj.yancheng.gov.cn/"


    def parse(self, response):
        sel = Selector(response)

        link_list = sel.xpath('//td[@align="right"]//td/a')
        for a_link in link_list:
            announcement_type = a_link.xpath("./@title").extract()[0]
            url = self.current_domain+a_link.xpath("./@href").extract()[0]
            print(announcement_type+"      "+ url)
            current_page_index = 1
            yield Request(url=url,callback=self.parse_list,meta={"announcement_type":announcement_type,'current_page_index':current_page_index})

    def parse_list(self,response):
        announcement_type = response.meta["announcement_type"]
        current_page_index = response.meta['current_page_index']


        sel = Selector(response)
        need_break = False

        items_node = sel.xpath("//div[@class='default_pgContainer']/table//tr")
        for tr_node in items_node:
            release_time = tr_node.xpath("./td[@align='center']/text()").extract()[0]
            release_time = release_time.replace(" ","")

            need_break = self.check_if_need_break(release_time)
            if need_break:
                break

            a_link_node = tr_node.xpath("./td[@align='left']/a")
            if a_link_node:
                url = self.current_domain + a_link_node.xpath("./@href").extract()[0]
                announcement_title = a_link_node.xpath("./text()").extract()[0]
                yield Request(url=url,callback=self.parse_info,meta={
                    'release_time':release_time,
                    'announcement_title':announcement_title,
                    'announcement_type':announcement_type
                })

        if not need_break:
            if "total_page" not in response.meta:
                total_page = self.get_total_page(response.text)
            else:
                total_page = response.meta["total_page"]

            if current_page_index < total_page:
                url_splits = response.url.split("?")
                next_page_url = url_splits[0] + "?uid=52179&pageNum="+ str(current_page_index+1)
                yield Request(url=next_page_url,callback=self.parse_list,meta={
                    "announcement_type":announcement_type,
                    'current_page_index':current_page_index+1,
                    'total_page':total_page
                })



    def get_total_page(self,html_text):
        match_str = 'perPage\:(\d*)'
        match = re.search(match_str, html_text)
        if match:
            per_page = int(match[1])
        else:
            per_page = 15

        match_str = 'totalRecord:(\d*)'
        match = re.search(match_str, html_text)
        if match:
            total = int(match[1])
        else:
            total = 0

        total_page = int(total / per_page) + 1
        return total_page







