# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from spidertools.common_pipeline.base_item import BaseItem
from spidertools.common_pipeline.base_item import convert_dict
from spidertools.utils.time_utils import get_current_date
from scrapy.selector import Selector
import os


import json
import requests


class LianYunGangShiZhengFuCaiGouJiaoYiPingTaiSpider(scrapy.Spider):
    '''
    连云港市政府采购交易平台 http://www.lygzfcg.gov.cn/lygzfcg/
    '''
    name = 'LianYunGangShiZhengFuCaiGouJiaoYiPingTai'
    name_zh = '连云港市政府采购交易平台'
    province = "江苏"
    city = '连云港'
    allowed_domains = ['lygzfcg.gov.cn']
    current_domain = 'http://www.lygzfcg.gov.cn'
    start_urls = ['http://www.lygzfcg.gov.cn/lygzfcg/cgxx/aboutjyxx.html']

    def __init__(self, full_dose=False):
        '''        :param full_dose: 是否全量爬取，默认为false
        '''
        self.browser_cookie = {}
        self.page_count = -1
        self.convert_dict = convert_dict
        self.full_dose = full_dose
        super().__init__()



    def close(self, spider):
        pass
        # self.browser.quit()

    def check_if_need_break(self, item_day, full_dose):
        need_break = False
        current_day = get_current_date()
        if not full_dose:
            if current_day != item_day:
                need_break = True
        return need_break

    def parse(self, response):
        sel = Selector(response)
        li_node = sel.xpath("//div[@class='ewb-menu-bd']//h3[@class='wb-tree-node']//a")
        for node in li_node:
            text = node.xpath("./text()").extract()[0]
            url = node.xpath("./@href").extract()[0]

            announcement_type = text
            infolist_url = self.current_domain+url
            yield Request(infolist_url,callback=self.parse_announ_list,meta={"announcement_type":announcement_type})


    def check_if_need_break(self, item_day, full_dose):
        need_break = False
        current_day = get_current_date()
        if not full_dose:
            if current_day != item_day:
                need_break = True
        return need_break

    def parse_announ_list(self,response):
        '''
        :param response:
        :return:
        '''
        announcement_type = response.meta['announcement_type']
        sel = Selector(response)

        need_break = False
        lm_list_node = sel.xpath("//div[@id='jt']/ul[@class='ewb-info-items.py']/li")
        if len(lm_list_node):
            for tr_node in lm_list_node:
                announcement_release_time_node = tr_node.xpath("./span[@class='ewb-date']")
                if announcement_release_time_node:
                    announcement_release_time = announcement_release_time_node.xpath('./text()').extract()[0]
                    announcement_release_time = announcement_release_time.replace("\r","").replace("\n","").replace("\t","").replace("[","").replace("]","")
                else:
                    announcement_release_time = ""

                need_break = self.check_if_need_break(announcement_release_time, self.full_dose)

                if need_break:
                    break

                announcement_url_title_node = tr_node.xpath('./div[@class="ewb-info-fors"]/a')
                if len(announcement_url_title_node):
                    announcement_url = announcement_url_title_node[0].xpath("./@href").extract()[0]
                    announcement_url = self.current_domain + announcement_url
                    announcement_title = announcement_url_title_node[0].xpath("./@title").extract()[0]
                else:
                    announcement_url = ""
                    announcement_title = ""

                if announcement_url != "":
                    yield Request(url=announcement_url,callback=self.parse_info,meta={"announcement_type":announcement_type,
                                                                                      'announcement_title':announcement_title,
                                                                                      'announcement_release_time':announcement_release_time})

        if not need_break:
            next_page_node = sel.xpath("//div[@id='jt']/div[@class='ewb-page']//li[@class='nextlink']/a[@class='alink']")
            if next_page_node:
                next_url =  next_page_node[0].xpath("./@href").extract()[0]
                next_url = self.current_domain + next_url
                yield Request(url=next_url,callback=self.parse_announ_list,meta={"announcement_type":announcement_type})






    def parse_info(self,response):
        announcement_title = response.meta['announcement_title']
        release_time = response.meta["announcement_release_time"]
        announcement_type = response.meta["announcement_type"]
        html = response.text
        origin_url = response.url

        item = BaseItem()
        item["announcement_title"] = announcement_title
        item["release_time"] = release_time
        item['announcement_type'] = announcement_type
        item['html'] = html
        item['origin_url'] = origin_url
        item['is_parsed'] = 0
        item['source_type'] = self.name_zh
        item['province'] = self.province
        item['city'] = self.city

        yield item















