import json
import re
import time

import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.jiangsu.xuzhou.items import XuZhouGongGongZiYuanJiaoYiPingTaiItem
from spidertools.common_pipeline.base_item import convert_dict


class WuXiShiGongGongZiYuanJiaoYiZhongXinSpider(scrapy.Spider):
    """
    无锡市公共资源交易中心
    http://xzfw.wuxi.gov.cn/ztzl/wxsggzyjyzxzl/index.shtml
    """
    name = 'WuXiShiGongGongZiYuanJiaoYiZhongXin'
    name_zh = '无锡市公共资源交易中心'
    province = "江苏"
    city = '无锡'
    allowed_domains = ['xzfw.wuxi.gov.cn']

    # start_urls = ['http://xzfw.wuxi.gov.cn/ztzl/wxsggzyjyzxzl/jyxx/jsgc/index.shtml']

    def __init__(self, full_dose=False):
        """
            :param full_dose: 是否全量爬取，默认为false
        """
        self.browser_cookie = {}
        self.page_count = -1
        self.convert_dict = convert_dict
        self.full_dose = full_dose
        super().__init__()

    def get_headers(self):
        return {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            "Host": "xzfw.wuxi.gov.cn",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36",

        }

    def start_requests(self):
        yield scrapy.Request(
            url="http://xzfw.wuxi.gov.cn/ztzl/wxsggzyjyzxzl/jyxx/jsgc/index.shtml",
            headers=self.get_headers(),
            callback=self.handle_response,
        )

    def handle_response(self, response):
        objs = re.findall(
            r'/\*\*(.*?)\*\*/\s+.{2}ajax\(\{\s+type : "post",\s+url:"(.*?)",\s+data: \{\s+\'chanId\':(.*?),',
            response.text)
        for obj in objs:
            construction_type, origin_url_type, post_data = obj[0].strip(r'\*'), "http://xzfw.wuxi.gov.cn" + obj[1], obj[2]
            print(construction_type, origin_url_type, post_data)
            # print(obj)
            yield scrapy.FormRequest(
                url=origin_url_type,
                callback=self.handle_response2,
                formdata=self.get_formdata(post_data, 1),
                dont_filter=True,
                meta={
                    'construction_type': construction_type,
                    'origin_url_type': origin_url_type,
                    'post_data':post_data,
                    'page': 1,
                    'need_break': False,
                }
            )

    def get_formdata(self, chanId, page):
        return {
            "chanId": f"{chanId}",
            "jyly": "",
            "pageIndex": f"{page}",
            "pageSize": "20",
        }

    """
    def parse(self, response):  # 第一层
        objs = response.xpath('//ul[@class="ewb-category-list"]/li/a')
        for obj in objs:
            construction_type = obj.xpath('./text()').extract_first()
            url_text = obj.xpath('./@href').extract()[0]
            construction_type_url = "http://xzfw.wuxi.gov.cn" + url_text if 'http' not in url_text else url_text
            '''
            结果：
                建设工程 http://xzfw.wuxi.gov.cn/ztzl/wxsggzyjyzxzl/jyxx/jsgc/index.shtml
                政府采购 http://xzfw.wuxi.gov.cn/ztzl/wxsggzyjyzxzl/jyxx/zfcg/index.shtml
                土地交易 http://xzfw.wuxi.gov.cn/ztzl/wxsggzyjyzxzl/jyxx/gtzy/index.shtml
                国有产权 http://xzfw.wuxi.gov.cn/ztzl/wxsggzyjyzxzl/jyxx/cqjy/index.shtml
                水利工程 http://xzfw.wuxi.gov.cn/ztzl/wxsggzyjyzxzl/jyxx/slgc/index.shtml
                交通工程 http://xzfw.wuxi.gov.cn/ztzl/wxsggzyjyzxzl/jyxx/jtgc/index.shtml
                部门集中和 http://cz.wuxi.gov.cn/ztzl/zfcg/cgxxgg/cggg/index.shtml
                药品耗材 http://58.215.18.16:7085/
                户外广告设施 http://xzfw.wuxi.gov.cn/ztzl/wxsggzyjyzxzl/jyxx/hwggzdss/index.shtml
                农村产权 http://wxbhq.jsnc.gov.cn/
            注：
                部门集中和分散采购/药品耗材/农村产权需要跳转到其他域网页，并且没有相关交易信息，这里未做处理
            '''
            if "wxsggzyjyzxzl" in construction_type_url:
                yield scrapy.Request(
                    url=construction_type_url,
                    callback=self.handle_response,
                    headers=self.get_headers(),
                    meta={
                        'construction_type': construction_type,

                    },
                    dont_filter=True,
                )

        #
        # page_obj = json.loads(response.text)
        # self.page_count = int(page_obj['return']) // 15 + 1
        # next_page_url = f'http://www.xzggzy.com.cn/services/XzsJsggWebservice/getList?response=application/json&pageIndex=0&pageSize=15&&categorynum=&bianhao=&xmmc=&diqutype='
        # yield scrapy.Request(url=next_page_url, cookies=self.browser_cookie, meta={'page': 0, 'need_break': False},
        #                      callback=self.handle_response, dont_filter=True)

    def handle_response(self, response):  # 第二层
        objs = response.xpath('//div[@class="whitebox"]/div')
        for obj in objs:
            info_type = obj.xpath('./h2//dt/text()').extract_first()
            info_type_url = "http://xzfw.wuxi.gov.cn/" + obj.xpath('.//a/@href').extract_first()
            child_classify = len(obj.xpath('./h2/dl/dd/a').extract())  # 0 2 3  # 用于判断是否有第三层（工程/非工程等）
            # print(info_type, info_type_url, child_classify)

            yield scrapy.Request(
                url=info_type_url,
                callback=self.handle_response2,
                headers=self.get_headers(),
                meta={
                    'construction_type': response.meta['construction_type'],
                    'info_type': info_type,
                    'child_classify':child_classify,
                    'need_break': False,
                },
                dont_filter=True,
            )
"""

    def handle_response2(self, response):
        objs = response.json()
        print(objs)
        if objs['list']:
            for obj in objs['list']:
                item = dict()
                item['release_time'] = obj['writeTimeString']
                item['announcement_title'] = obj['title']
                item['construction_type'] = response.meta['construction_type']
                # item['info_type'] = response.meta['info_type']
                item['detail_info'] = "" if 'detail_info' not in response.meta else response.meta['detail_info']
                item['origin_url'] = "http://xzfw.wuxi.gov.cn" + obj['url']
                item['html_append'] = obj['attriBute'][0:10]
                item['project_area'] = obj['jyly']
                print(item)
                yield scrapy.Request(
                    url=item['origin_url'],
                    callback=self.handle_item,
                    meta=item,
                )
            if not response.request.meta["need_break"]:
                page = response.request.meta['page'] +1
                post_data = response.request.meta['post_data']
                yield scrapy.FormRequest(
                    url=response.request.meta['origin_url_type'],
                    callback=self.handle_response2,
                    formdata=self.get_formdata(post_data, page),
                    dont_filter=True,
                    meta={
                        'construction_type': response.request.meta['construction_type'],
                        'origin_url_type': response.request.meta['origin_url_type'],
                        'post_data':post_data,
                        # 'origin_url': origin_url,
                        'page': page,
                        'need_break': False,
                    }
                )

    def handle_item(self, response):
        item = XuZhouGongGongZiYuanJiaoYiPingTaiItem()
        item['project_area'] = response.request.meta['project_area']
        item['construction_type'] = response.request.meta['construction_type']
        item['release_time'] = response.request.meta['release_time']
        item['origin_url'] = response.request.meta['origin_url']
        item['announcement_title'] = response.request.meta['announcement_title']
        item['html'] = response.text  # [0:40]
        item['source_type'] = self.name_zh
        item['is_parsed'] = 0
        item['province'] = self.province
        item['city'] = self.city
        yield item
