import json
import re
import time
from datetime import datetime

import scrapy
from lxml import etree
from spidertools.common_pipeline.base_item import convert_dict
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.jiangsu.yangzhou.items import YangZhouShiGongGongZiYuanJiaoYiFuWuPingTaiItem


class yangzhoushigonggongziyuanjiaoyifuwupingtaiSpider(scrapy.Spider):
    """
        扬州市公共资源交易服务平台
        http://ggzyjyzx.yangzhou.gov.cn/
    """
    name = 'YangZhouShiGongGongZiYuanJiaoYiFuWuPingTai'
    name_zh = "扬州市公共资源交易服务平台"
    province = "江苏"
    city = '扬州'
    allowed_domains = ['ggzyjyzx.yangzhou.gov.cn']

    def __init__(self, full_dose=False):
        '''        :param full_dose: 是否全量爬取，默认为false
        '''
        self.browser_cookie = {}
        self.convert_dict = convert_dict
        self.full_dose = full_dose
        super().__init__()

    def parse(self, response):
        pass

    def start_requests(self):
        yield scrapy.Request(
            url='http://ggzyjyzx.yangzhou.gov.cn/qtyy/ggzyjyzx/jyxx_listn.html?shownode=0',
            headers=self.get_headers(),
            callback=self.start_requests2,
        )

    def get_classify(self, res):
        objs = re.findall(r'{(.*?)}', re.sub(r'\s', '', re.sub(r'//.*\n', '',
                                                               re.findall(r'^        var zNodes =(\[.*?]);$', res.text,
                                                                          re.MULTILINE | re.DOTALL)[0])))
        trade_info = {}
        for obj in objs:
            key = ""
            obj_dict = dict()
            its = obj.split(',')
            for i in its:
                k, v = i.split(':')
                if k == 'id':
                    key = v
                if k == 'url':
                    v = "http://ggzyjyzx.yangzhou.gov.cn" + v.split("'")[1]
                obj_dict[k] = v
            trade_info[key] = obj_dict
        for k, v in trade_info.items():
            if (v['pId'] in trade_info and len(k) == 2) or k == '110':
                v['construction_type'] = trade_info[v['pId']]['name']
            elif v['pId'] in trade_info and len(k) == 3:
                v['info_type'] = trade_info[v['pId']]['name']
                trade_info[v['pId']]['no_use'] = 1
                v['construction_type'] = trade_info[trade_info[v['pId']]['pId']]['name']
        classify = []
        for k, v in trade_info.items():
            if len(k) != 1 and 'no_use' not in v and k != '73':
                classify.append(v)
        return classify


    def start_requests2(self, response):
        classify = self.get_classify(response)
        for obj in classify:
            url = obj['url'] + str(int(time.time()))
            construction_type = obj['construction_type']
            if 'info_type' in obj:
                info_type = obj['info_type']
                detail_info = obj['name']
            else:
                info_type = obj['name']
                detail_info = ''
            yield scrapy.Request(
                url=url,
                callback=self.handle_response,
                meta={
                    "url":url,
                    "page": 1,
                    "page_count": -1,
                    "construction_type":construction_type ,
                    "info_type": info_type,
                    "detail_info": detail_info,
                    "need_break": False,
                },
                dont_filter=True,
            )


    def handle_response(self, response):
        html = etree.HTML(response.text)
        if response.request.meta['page_count'] == -1:
            try:
                response.request.meta['page_count'] = int(re.findall(r'submitPage\((.*?),',html.xpath('//span[last()-1]/a/@href')[0])[0])
            except Exception as e:
                # print(e)
                pass
            else:
                response.request.meta['page_count'] = 1
        objs = html.xpath('//ul/li')
        for obj in objs:
            item = dict()
            item['release_time'] = obj.xpath('./span/text()')[0]
            if not self.full_dose and item['release_time'] != get_current_date():
                response.request.meta['need_break'] = True
            else:
                item['announcement_title'] = obj.xpath('./a/text()')[0]
                item['origin_url'] = "http://ggzyjyzx.yangzhou.gov.cn" + obj.xpath('./a/@href')[0]
                item['construction_type'] = response.request.meta['construction_type']
                item['info_type'] = response.request.meta['info_type']
                item['detail_info'] = response.request.meta['detail_info']
                yield scrapy.Request(url=item['origin_url'],
                                     dont_filter=True,
                                     headers=self.get_headers(),
                                     callback=self.parse_html,
                                     meta=item)
        if not response.request.meta['need_break']:
            page_count = response.request.meta['page_count']
            page = response.request.meta['page']
            if page < page_count:
                page += 1

                url = response.request.meta['url']+f"&currentPage={page}"
                yield scrapy.Request(url=url,
                                     headers=self.get_headers(),
                                     callback=self.handle_response,
                                     meta={
                                         'url':response.request.meta['url'],
                                         'need_break': False,
                                          "construction_type":response.request.meta['construction_type'] ,
                                          "info_type": response.request.meta['info_type'],
                                          "detail_info": response.request.meta['detail_info'],
                                         'page': page,
                                         'page_count': page_count,
                                     },
                                     dont_filter=True
                                     )

    def get_headers(self):
        return {
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                "Accept-Encoding": "gzip, deflate",
                "Accept-Language": "zh-CN,zh;q=0.9",
                "Connection": "keep-alive",
                "Host": "ggzyjyzx.yangzhou.gov.cn",
                "Upgrade-Insecure-Requests": "1",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36",
            }

    def parse_html(self, response):
        item = YangZhouShiGongGongZiYuanJiaoYiFuWuPingTaiItem()
        item['html'] = response.text  # [0:40]
        item["announcement_title"] = response.meta['announcement_title']
        item["release_time"] = response.meta["release_time"]
        item['origin_url'] = response.meta['origin_url']
        item["construction_type"] = response.meta['construction_type'].strip('"')
        item['info_type'] = response.meta['info_type'].strip('"')
        item['detail_info'] = response.meta['detail_info'].strip('"')
        item['is_parsed'] = 0
        item['source_type'] = self.name_zh
        item['province'] = self.province
        item['city'] = self.city
        yield item
