# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from spidertools.common_pipeline.base_item import BaseItem
from spidertools.common_pipeline.base_item import convert_dict
from spidertools.utils.time_utils import get_current_date
from scrapy.selector import Selector
import os
import re,json

from urllib.parse import urlencode


import json
import requests


class HuaiAnShiZhengFuCaiGouWangSpider(scrapy.Spider):
    '''
    淮安市政府采购网 http://zfcgzx.huaian.gov.cn/
    '''
    name = 'HuaiAnShiZhengFuCaiGouWang'
    name_zh = '淮安市政府采购网'
    province = "江苏"
    city = '淮安'
    allowed_domains = ['zfcgzx.huaian.gov.cn','222.184.79.16']
    current_domain = 'http://zfcgzx.huaian.gov.cn/'
    start_urls = ['http://zfcgzx.huaian.gov.cn/zbcg/cggg/list.html']

    def __init__(self, full_dose=False):
        '''        :param full_dose: 是否全量爬取，默认为false
        '''
        self.browser_cookie = {}
        self.page_count = -1
        self.convert_dict = convert_dict
        self.full_dose = full_dose
        super().__init__()



    def close(self, spider):
        pass
        # self.browser.quit()

    def check_if_need_break(self, item_day, full_dose):
        need_break = False
        current_day = get_current_date()
        if not full_dose:
            if current_day != item_day:
                need_break = True
        return need_break

    def parse(self, response):
        sel = Selector(response)
        li_node = sel.xpath("//div[@class='list-left']//div[@class='left-qh']/a")
        for node in li_node:
            text = node.xpath("./text()").extract()[0]
            url = node.xpath("./@href").extract()[0]

            announcement_type = text
            infolist_url = self.current_domain+url
            yield Request(infolist_url,callback=self.parse_announ_list,meta={"announcement_type":announcement_type})


    def check_if_need_break(self, item_day, full_dose):
        need_break = False
        current_day = get_current_date()
        if not full_dose:
            if current_day != item_day:
                need_break = True
        return need_break

    def parse_announ_list(self,response):
        '''
        :param response:
        :return:
        '''
        announcement_type = response.meta['announcement_type']
        sel = Selector(response)

        need_break = False
        lm_list_node = sel.xpath("//div[@class='list-lb']/ul/li")
        if len(lm_list_node):
            for tr_node in lm_list_node:
                announcement_release_time_node = tr_node.xpath("./span[@class='lb-time']")
                if announcement_release_time_node:
                    announcement_release_time = announcement_release_time_node.xpath('./text()').extract()[0]
                else:
                    announcement_release_time = ""

                need_break = self.check_if_need_break(announcement_release_time, self.full_dose)

                if need_break:
                    break

                announcement_url_title_node = tr_node.xpath('./a')
                if len(announcement_url_title_node):
                    announcement_url = announcement_url_title_node[0].xpath("./@href").extract()[0]
                    announcement_url = self.current_domain + announcement_url
                    announcement_title = announcement_url_title_node[0].xpath("./@title").extract()[0]
                else:
                    announcement_url = ""
                    announcement_title = ""

                if announcement_url != "":
                    yield Request(url=announcement_url,callback=self.parse_info,meta={"announcement_type":announcement_type,
                                                                                      'announcement_title':announcement_title,
                                                                                      'release_time':announcement_release_time})

        if not need_break:
            html_source = response.text
            match_regrex = 'nsetpage\((.*?)\)'
            match_result = re.search(match_regrex,html_source)
            current_url = response.url
            if match_result:
                match_text = match_result[1]
                page_index,_,_,_,_,type_id = match_text.split(",")
                page_index = int(page_index)



                if 'list.html' in current_url:
                    next_url = current_url.replace("list.html","list_%s.html"%(page_index+1))
                else:
                    next_url = current_url.replace("list_%s.html" % (page_index), "list_%s.html" % (page_index + 1))


                yield Request(url=next_url,callback=self.parse_announ_list,meta={"announcement_type":announcement_type})

                if page_index == 50:
                    real_url = self.make_more_url(type_id,page_index)
                    yield Request(real_url, callback=self.parse_more_items,
                                  meta={'pageNo': page_index, 'type_id': type_id,
                                        "announcement_type": announcement_type})





    def parse_more_items(self,response):
        print("come in")
        pageNo = int(response.meta['pageNo'])
        type_id = response.meta['type_id']
        announcement_type = response.meta['announcement_type']

        json_obj  = json.loads(response.text)
        if json_obj:
            for dict_ in json_obj['list']:
                domain = dict_['domain']
                path = dict_['path']
                release_time = dict_['release_time'].split(" ")[0]
                title = dict_['title']

                item_url = domain+path
                yield Request(url=item_url,callback=self.parse_info,meta={"announcement_type":announcement_type,"announcement_title":title,'release_time':release_time})

            total_num = int(json_obj["numfound"]/10) + 1
            if pageNo < total_num:
                real_url = self.make_more_url(type_id,pageNo+1)
                yield Request(real_url, callback=self.parse_more_items,
                              meta={'pageNo': pageNo + 1, 'type_id': type_id,
                                    "announcement_type": announcement_type})




    def make_more_url(self,type_id,page_index):
        params = {
            "q": "keyword:*",
            "ename": "core",
            "pageNo": 0,
            "hl": "false",
            "fq": '["layer:01520002001600010000*"]',
            "rows": "10",
            "sort": "order_no:desc"
        }
        type_id = type_id.replace("'", "")
        params['pageNo'] = page_index
        params['fq'] = '["layer:%s"]' % type_id

        url_prefix = 'http://222.184.79.16:8080//api/query.do?'
        real_url = url_prefix + urlencode(params)
        return real_url





    def parse_info(self,response):
        announcement_title = response.meta['announcement_title']
        release_time = response.meta["release_time"]
        announcement_type = response.meta["announcement_type"]
        html = response.text
        origin_url = response.url

        item = BaseItem()
        item["announcement_title"] = announcement_title
        item["release_time"] = release_time
        item['announcement_type'] = announcement_type
        item['html'] = html
        item['origin_url'] = origin_url
        item['is_parsed'] = 0
        item['source_type'] = self.name_zh
        item['province'] = self.province
        item['city'] = self.city

        yield item




if __name__ == '__main__':
    params = {
        "q": "keyword:*",
        "ename": "core",
        "pageNo": "53",
        "hl": "false",
        "fq": '["layer:01520002001600010000*"]',
        "rows": "10",
        "sort": "order_no:desc"
    }
    print(urlencode(params))
    url_prefix = 'http://222.184.79.16:8080//api/query.do?'
    real_url = url_prefix + urlencode(params)
    import requests
    req = requests.get(real_url)
    print(req.json())
    print("q=keyword%3A*&ename=core&pageNo=53&hl=false&fq=%5B%22layer%3A01520002001600010000*%22%5D&rows=10&sort=order_no%3Adesc")










