import json
import re
import time
from datetime import datetime

import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.jiangsu.xuzhou.items import XuZhouShiGongGongZiYuanJianSheGongChengJiaoYiPingTaiItem
from spidertools.common_pipeline.base_item import convert_dict


class NanJingShiJianSheGongChengJiaoYiZhongXinSpider(scrapy.Spider):
    """
    南京市建设工程交易中心
     http://www.xzcet.com/xzwebnew/
    """
    name = 'NanJingShiJianSheGongChengJiaoYiZhongXin'
    name_zh = '南京市建设工程交易中心'
    province = "江苏"
    city = '南京'
    allowed_domains = ['njcein.com.cn']
    start_urls = ['http://www.njcein.com.cn/njxxnew/xmxx/004001/']

    def __init__(self, full_dose=False):
        """
            :param full_dose: 是否全量爬取，默认为false
        """
        self.browser_cookie = {}
        self.page_count = -1
        self.convert_dict = convert_dict
        self.full_dose = full_dose
        super().__init__()

    def parse(self, response):
        objs = response.xpath('//td[@class="LeftMenu"]/a')
        for obj in objs:
            construction_type = obj.xpath('./font/text()').extract_first()
            url = "http://www.njcein.com.cn" + obj.xpath('./@href').extract_first() + "/"
            # print(construction_type, url)
        # 因为其他项无法查看下一页，这里只搜索了招标公告的下一页处理
        construction_type = "招标公告"
        url = 'http://www.njcein.com.cn/njxxnew/xmxx/zbgg/default.aspx'
        yield scrapy.Request(url=url,
                             callback=self.handles_request2,
                             # headers={
                             #     "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                             #     "Accept-Encoding": "gzip, deflate",
                             #     "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
                             #     "Connection": "keep-alive",
                             #     # "Cookie":"ASP.NET_SessionId=wgfzwnq1iw0kb3inhieflp55",
                             #     "Host": "www.njcein.com.cn",
                             #     # "Referer":"http://www.njcein.com.cn/njxxnew/xmxx/004003/",
                             #     "Upgrade-Insecure-Requests": "1",
                             #     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.51",
                             # },
                             meta={
                                 'construction_type': construction_type,
                                 'page': 1,
                                 'page_count': -1,
                                 'need_break': False,
                             },
                             # dont_filter=True,
                             )
    #
    # def handles_request2(self, response):
    #     print(response.headers)
    #
    #     objs = response.xpath('./input[@type="hidden"]')
    #     fake_data(response, page+1)
    #     for obj in objs:
    #         pass
    #     print(response.text)



        # objs = response.xpath("//table//tr/td[@class='classleftbg']/a")
        # for obj in objs:
        #     categoryNum = obj.xpath('./@href').extract_first().split('=')[-1]
        #     info_type = obj.xpath('./text()').extract_first()
        #     yield scrapy.Request(
        #         url="http://www.xzcet.com/xzwebnew/ztbpages/MoreinfoZbgg.aspx?categoryNum=" + categoryNum,
        #         headers={
        #             "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        #             "Accept-Encoding": "gzip, deflate",
        #             "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        #             "Host": "www.xzcet.com",
        #             "Proxy-Connection": "keep-alive",
        #             "Upgrade-Insecure-Requests": "1",
        #             "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.80 Safari/537.36 Edg/86.0.622.48",
        #         },
        #         meta={
        #             'construction_type': response.request.meta['construction_type'],
        #             'info_type': info_type,
        #             'need_break': False,
        #             'page': 1,
        #             'page_count': -1,
        #         },
        #         callback=self.handles_request3,
        #         dont_filter=True,
        #     )

    def handles_request2(self, response):
        if "Cookie" not in response.request.headers:
            Cookie_byte = response.headers['Set-Cookie']
            Cookie = str(Cookie_byte).split(';')[0].split("'")[-1]
            response.meta["Cookie"] = Cookie
        if response.meta['page_count'] == -1:
            response.meta['page_count'] = int(int(re.findall(r'共：.*?(\d+).*?条', response.text)[0])//10 +1)
        objs_all = response.xpath('//td[@id="tdcontent"]//tr')
        for obj in objs_all:
            item = dict()
            try:
                item['origin_url'] = 'http://www.njcein.com.cn' + obj.xpath('./td[last()-2]/a/@href').extract_first().strip()
                item['announcement_title'] = obj.xpath('./td[last()-2]/a/text()').extract_first()
                item['release_time'] = obj.xpath('./td[last()]/text()').extract_first().strip()
                item['project_area'] = obj.xpath('./td[last()-1]/text()').extract_first().strip()
                item['info_type'] = obj.xpath('./td[2]/text()').extract_first().strip()
                # item['info_type'] = response.meta['info_type']
                item['construction_type'] = response.meta['construction_type']
                print(item)
                if not self.full_dose and item['release_time'] < get_current_date():
                    response.meta['need_break'] = True
                else:
                    yield scrapy.Request(
                        url=item['origin_url'],
                        meta=item,
                        dont_filter=True,
                        callback=self.handle_item_response,
                    )
            except Exception as e:
                print(e)
                continue

        if not response.meta['need_break']:
            page = response.meta['page']
            total_count = response.meta['page_count']
            if page < total_count:
                page += 1
                fake_data = self.fake_data(response, page)
                yield scrapy.FormRequest(
                    url=response.url,
                    formdata=fake_data,
                    dont_filter=True,
                    meta={
                        'construction_type': response.meta['construction_type'],
                        'page': page,
                        'page_count': response.meta['page_count'],
                        'Cookie': response.meta["Cookie"],
                    },
                    headers={
                        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                        "Accept-Encoding": "gzip, deflate",
                        "Accept-Language": "zh-CN,zh;q=0.9",
                        "Cache-Control": "max-age=0",
                        "Connection": "keep-alive",
                        # "Content-Length":"28601",
                        "Content-Type": "application/x-www-form-urlencoded",
                        # "Cookie":"ASP.NET_SessionId=kklth255lgg5bzy5nmeow445",
                        # "Cookie": response.meta["Cookie"],
                        "Host": "www.jszb.com.cn",
                        # "Origin": "http://www.jszb.com.cn",
                        # "Referer":"http://www.jszb.com.cn/JSZB/YW_info/ZhaoBiaoGG/MoreInfo_ZBGG.aspx?categoryNum=012",
                        "Upgrade-Insecure-Requests": "1",
                        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36",
                    },
                    cookies=response.meta["Cookie"],
                )

    def fake_data(self, response, page):
        form_data = {}
        print(response.text)
        post_data = response.xpath('//input[@type="hidden"]')
        for data in post_data:
            name = data.xpath('./@name').extract_first()
            try:
                value = data.xpath('./@value').extract_first()
            except:
                pass
            if not value:
                value = ""
            form_data[name] = value
            form_data['__EVENTARGUMENT'] = str(page)
        return form_data

    def handle_item_response(self, response):
        item = XuZhouShiGongGongZiYuanJianSheGongChengJiaoYiPingTaiItem()
        # release_time = '-'.join(re.findall(r'<font color="#888888" class="webfont">【信息时间：\s+(\d*)/(\d*)/(\d*)\s+&nbsp;&nbsp;阅读次数', response.text)[0])

        item['construction_type'] = response.request.meta['construction_type']
        item['release_time'] = response.request.meta['release_time']
        item['origin_url'] = response.request.meta['origin_url']
        item['announcement_title'] = response.request.meta['announcement_title']
        # item['detail_info'] = response.request.meta['detail_info']  # 无内容
        item['html'] = response.text  # [0:40]
        item['source_type'] = self.name_zh
        item['is_parsed'] = 0
        item['province'] = self.province
        item['city'] = self.city
        yield item
