import json
import re
import time
from datetime import datetime

import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.jiangsu.xuzhou.items import XuZhouShiGongGongZiYuanJianSheGongChengJiaoYiPingTaiItem
from spidertools.common_pipeline.base_item import convert_dict


class YangZhouGongChengJianSheXinXiWangSpider(scrapy.Spider):
    """
    扬州工程建设信息网
     http://www.yzcetc.com/Yzcetc/default.aspx
    """
    name = 'YangZhouGongChengJianSheXinXiWang'
    name_zh = '扬州工程建设信息网'
    province = "江苏"
    city = '扬州'
    allowed_domains = ['www.yzcetc.com']
    start_urls = ['http://www.yzcetc.com/yzcetc/YW_Info/ZhongBiaoHXRGS/MoreHXRGSList_YZ_New.aspx?CategoryNum=003']

    def __init__(self, full_dose=False):
        """
            :param full_dose: 是否全量爬取，默认为false
        """
        self.browser_cookie = {}
        self.page_count = -1
        self.convert_dict = convert_dict
        self.full_dose = full_dose
        super().__init__()

    def parse(self, response):
        print(response.text)
        print(type(response.headers['Set-Cookie']))
        print(response.headers['Set-Cookie'])
        print(response.headers)
        print(type(response.headers))
        Cookie_handle = re.findall(r"Set-Cookie': \[b'(.*?)', b'(.*?)'],", str(response.headers))
        Cookie = ";".join(list(map(lambda x: x.split(';')[0], Cookie_handle[0])))  # ASP.NET_SessionId=v3phci55wjz3tbvrfbwbze45;__CSRFCOOKIE=6a5cc916-32a7-4727-9147-8b45c2533c8b
        print(Cookie)
        objs_xpath = response.xpath('//span[@id="LeftMenu1_Span_Category"]//tr/td/a')
        for obj in objs_xpath:
            url = "http://www.yzcetc.com/" + obj.xpath('./@href').extract_first()
            construction_type = obj.xpath('./font/text()').extract_first()
        #     yield scrapy.Request(url=url,
        #                          callback=self.handles_request2,
        #                          headers={
        #                              "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        #                              "Accept-Encoding": "gzip, deflate",
        #                              "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        #                              "Host": "www.xzcet.com",
        #                              "Proxy-Connection": "keep-alive",
        #                              "Upgrade-Insecure-Requests": "1",
        #                              "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.80 Safari/537.36 Edg/86.0.622.48",
        #                          },
        #                          meta={
        #                              'construction_type': construction_type,
        #                          },
        #                          dont_filter=True,
        #                          )

    def handles_request2(self, response):
        objs = response.xpath("//table//tr/td[@class='classleftbg']/a")
        for obj in objs:
            categoryNum = obj.xpath('./@href').extract_first().split('=')[-1]
            info_type = obj.xpath('./text()').extract_first()
            yield scrapy.Request(
                url="http://www.xzcet.com/xzwebnew/ztbpages/MoreinfoZbgg.aspx?categoryNum=" + categoryNum,
                headers={
                    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                    "Accept-Encoding": "gzip, deflate",
                    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
                    "Host": "www.xzcet.com",
                    "Proxy-Connection": "keep-alive",
                    "Upgrade-Insecure-Requests": "1",
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.80 Safari/537.36 Edg/86.0.622.48",
                },
                meta={
                    'construction_type': response.request.meta['construction_type'],
                    'info_type': info_type,
                    'need_break': False,
                    'page': 1,
                    'page_count': -1,
                },
                callback=self.handles_request3,
                dont_filter=True,
            )

    def handles_request3(self, response):
        if "Cookie" not in response.request.headers:
            # ASP.NET_SessionId=vzfjaq550k1mct45bsvrmi45; path=/; HttpOnly
            Cookie_byte = response.headers['Set-Cookie']
            Cookie = str(Cookie_byte).split(';')[0].split("'")[-1]
            response.meta["Cookie"] = Cookie
        if response.meta['page_count'] == -1:
            response.meta['page_count'] = int(re.findall(r'总页数：<font color="blue"><b>(\d+?)</b>', response.text)[0])

        objs_all = response.xpath('//td[@id="MoreinfoListJyxx1_tdcontent"]//tr')
        for obj in objs_all:
            item = dict()
            try:
                item['origin_url'] = 'http://www.xzcet.com/' + obj.xpath('./td[2]/a/@href').extract_first()
                item['release_time'] = obj.xpath('./td[last()]/text()').extract_first().strip()
                item['detail_info'] = obj.xpath('./td[last()-1]/text()').extract_first()
                item['announcement_title'] = obj.xpath('./td[2]/a/text()').extract_first()
                item['info_type'] = response.meta['info_type']
                item['construction_type'] = response.meta['construction_type']
                if not self.full_dose and item['release_time'] != get_current_date()[5:]:
                    response.meta['need_break'] = True
                else:
                    yield scrapy.Request(
                        url=item['origin_url'],
                        meta=item,
                        dont_filter=True,
                        callback=self.handle_item_response,
                    )
            except Exception as e:
                print(e)
                continue

        if not response.meta['need_break']:
            page = response.meta['page']
            total_count = response.meta['page_count']
            if page < total_count:
                page += 1
                fake_data = self.fake_data(response, page)
                yield scrapy.FormRequest(
                    url=response.url,
                    formdata=fake_data,
                    dont_filter=True,
                    meta={
                        'construction_type': response.meta['construction_type'],
                        'page': page,
                        'page_count': response.meta['page_count'],
                        'Cookie': response.meta["Cookie"],
                    },
                    headers={
                        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                        "Accept-Encoding": "gzip, deflate",
                        "Accept-Language": "zh-CN,zh;q=0.9",
                        "Cache-Control": "max-age=0",
                        "Connection": "keep-alive",
                        "Content-Type": "application/x-www-form-urlencoded",
                        "Host": "www.jszb.com.cn",
                        "Upgrade-Insecure-Requests": "1",
                        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36",
                    },
                    cookies=response.meta["Cookie"],

                )

    def fake_data(self, response, page):
        form_data = {}
        post_data = response.xpath('//input[@type="hidden"]')
        for data in post_data:
            name = data.xpath('./@name').extract_first()
            try:
                value = data.xpath('./text()').extract_first()
            except:
                pass
            if not value:
                value = ""
            form_data[name] = value
            form_data['__EVENTARGUMENT'] = str(page)
        return form_data

    def handle_item_response(self, response):
        item = XuZhouShiGongGongZiYuanJianSheGongChengJiaoYiPingTaiItem()
        # release_time = '-'.join(re.findall(r'<font color="#888888" class="webfont">【信息时间：\s+(\d*)/(\d*)/(\d*)\s+&nbsp;&nbsp;阅读次数', response.text)[0])
        ti = list(
            map(int, re.findall(r'<font color="#888888" class="webfont">【信息时间：\s+(\d*)/(\d*)/(\d*)\s+&nbsp;&nbsp;阅读次数',
                                response.text)[0]))
        release_time = datetime(ti[0], ti[1], ti[2]).strftime('%Y-%m-%d')
        item['construction_type'] = response.request.meta['construction_type']
        # item['release_time'] = response.request.meta['release_time']
        item['release_time'] = release_time
        item['origin_url'] = response.request.meta['origin_url']
        item['announcement_title'] = response.request.meta['announcement_title']
        # item['detail_info'] = response.request.meta['detail_info']  # 无内容
        item['html'] = response.text  # [0:40]
        item['source_type'] = self.name_zh
        item['is_parsed'] = 0
        item['province'] = self.province
        item['city'] = self.city
        yield item
