import time

import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.inner_utils.standardize_field_utils import check_city_field, check_time_field
from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.guangdong.items import GuangDongShengQuanGuoGongGongZiYuanJiaoYiPingTaiItem
from commonresources.spiders.basespider import BaseSpider


class GuangDongShengQuanGuoGongGongZiYuanJiaoYiPingTaiSpider(BaseSpider):
    """
        广东省全国公共资源交易平台
                主  页：http://bs.gdggzy.org.cn/osh-web/
                详情页：招标公告：http://bs.gdggzy.org.cn/osh-web/project/projectbulletin/bulletinList?queryType=1&orgCode=gd&tradeTypeId=Construction&tradeItemId=gc_res_bulletin
                      结果公告：http://bs.gdggzy.org.cn/osh-web/project/projectbulletin/bulletinList?queryType=3&orgCode=gd&tradeTypeId=Construction&tradeItemId=gc_res_result
    """
    name = "GuangDongShengQuanGuoGongGongZiYuanJiaoYiPingTai"
    name_zh = "广东省全国公共资源交易平台"
    province = "广东省"

    start_urls = ["http://bs.gdggzy.org.cn/osh-web/"]

    def __init__(self, full_dose=False):
        super(GuangDongShengQuanGuoGongGongZiYuanJiaoYiPingTaiSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict  # 存储时转化用

    def parse(self, response):
        start_conditions = {
            '1': ["招标公告", "gc_res_bulletin"],
            "2": ["中标公告", "gc_res_result"],
        }
        set_cookies = response.headers.getlist('Set-Cookie')
        jsessionid, jsluid = '', ''
        for cookie in set_cookies:
            cookie_parse = cookie.split(b';')[0].decode('utf-8')
            if cookie_parse.startswith("JSESSIONID="):
                jsessionid = cookie_parse[11:]
            elif cookie_parse.startswith("__jsluid_h="):
                jsluid = cookie_parse[11:]
        for condition, value in start_conditions.items():
            url = "http://bs.gdggzy.org.cn/osh-web/project/projectbulletin/bulletinList"
            yield scrapy.FormRequest(
                url=url,
                callback=self.handle_response,
                headers=self.faker_headers(jsessionid, jsluid),
                formdata=self.faker_formdata(condition, 1, value[-1]),
                dont_filter=True,
                meta={
                    "query_num": condition,
                    "announcement_type": value[0],
                    "tradeItemId": value[-1],
                    "page": 1,
                    "need_break": False,
                    "jsessionid": jsessionid,
                    "jsluid": jsluid,
                })

    def handle_response(self, response):
        objs = response.xpath('//table[@class="table"]//tr')
        jsessionid = response.meta['jsessionid']
        jsluid = response.meta['jsluid']
        for index, obj in enumerate(objs):
            if not index:
                continue
            release_time = check_time_field(obj.xpath('./td[4]/span/text()').extract_first())
            if not self.full_dose and release_time != get_current_date():
                response.request.meta['need_break'] = True
            elif release_time < "2015-12-31":
                response.meta['need_break'] = True
            else:
                item = dict()
                item['item'] = GuangDongShengQuanGuoGongGongZiYuanJiaoYiPingTaiItem()
                item['release_time'] = release_time
                item['announcement_type'] = response.meta['announcement_type']
                item['announcement_title'] = obj.xpath('./td[2]/a/text()').extract_first().strip()
                item['origin_url'] = "http://bs.gdggzy.org.cn" + obj.xpath('./td[2]/a/@href').extract_first()
                item['project_city'] = check_city_field(obj.xpath('./td[3]/text()').extract_first())
                yield scrapy.Request(
                    url=item['origin_url'],
                    headers=self.faker_headers(jsessionid, jsluid, 1),
                    callback=self.parse_item_new,
                    meta=item,
                    dont_filter=True,
                )

        if not response.meta['need_break']:
            page = response.meta['page'] + 1
            query_num = response.meta['query_num']
            tradeItemId = response.meta['tradeItemId']
            announcement_type = response.meta['announcement_type']
            yield scrapy.FormRequest(
                url=response.url,
                callback=self.handle_response,
                headers=self.faker_headers(jsessionid, jsluid),
                formdata=self.faker_formdata(query_num, page, tradeItemId),
                dont_filter=True,
                meta={
                    "query_num": query_num,
                    "tradeItemId": tradeItemId,
                    "announcement_type": announcement_type,
                    "page": page,
                    "need_break": False,
                    "jsessionid": jsessionid,
                    "jsluid": jsluid,
                })

    def faker_headers(self, jsessionid, jsluid, flag=0):
        stamp = int(time.time())
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "Cache-Control": "max-age=0",
            "Connection": "keep-alive",
            "Content-Type": "application/x-www-form-urlencoded",
            "Cookie": f"Hm_lvt_e69ca51933e85f436518122b1647992e={stamp}; Hm_lpvt_e69ca51933e85f436518122b1647992e={stamp + 27}; JSESSIONID={jsessionid}; __jsluid_h={jsluid}",
            "Host": "bs.gdggzy.org.cn",
            "Origin": "http://bs.gdggzy.org.cn",
            "Referer": "http://bs.gdggzy.org.cn/osh-web/project/projectbulletin/bulletinList",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66",
        }
        if flag:
            del headers['Cache-Control']
            del headers['Content-Type']
            del headers['Origin']
        return headers

    def faker_formdata(self, query_num, page_num, tradeItemId):
        """
            query_num: 1 招标公告;      3:中标公告
            page_num: 页码
        """
        return {
            "orgCode": "",
            "tradeTypeId": "Construction",
            "queryType": f"{query_num}",
            "tradeItemId": f"{tradeItemId}",
            "bulletinName": "",
            "startTime": "",
            "endTime": "",
            "pageNum": f"{page_num}",
        }
