# -*- coding: utf-8 -*-
# @Time : 2020/12/2 11:22
# @Author : zhangxing
# @File :
import json
import re

import requests
import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.shandong.items import ShanDongShengCaiGouYuZhaoBiaoWangItem
from commonresources.spiders.basespider import BaseSpider


class ShanDongShengCaiGouYuZhaoBiaoWangSpider(BaseSpider):
    """
        山东省采购与招标网
                主  页：https://www.sdbidding.org.cn/
                详情页：https://www.sdbidding.org.cn/bulletins
    """
    name = "ShanDongShengCaiGouYuZhaoBiaoWang"
    name_zh = "山东省采购与招标网"
    province = "山东"
    # allowed_domains = ['']
    start_urls = ["https://www.sdbidding.org.cn/"]

    def __init__(self, full_dose=False):
        super(ShanDongShengCaiGouYuZhaoBiaoWangSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict  # 存储时转化用

    def parse(self, response):
        cookie = (response.headers['Set-Cookie'].split(b';')[0]).decode('utf-8')
        url = "https://www.sdbidding.org.cn/bulletins"
        start_configs = [{
            "announcement_type_num": 11,
            "announcement_type": "招标公告"
        }, {
            "announcement_type_num": 12,
            "announcement_type": "中标公告"
        }]
        for obj in start_configs:
            announcement_type_num = obj['announcement_type_num']
            announcement_type = obj['announcement_type']
            yield scrapy.FormRequest(
                url=url,
                callback=self.handle_response,
                headers=self.faker_headers(cookie),
                dont_filter=True,
                formdata=self.faker_formdata(1, announcement_type_num),
                meta={
                    'announcement_type_num': announcement_type_num,
                    'announcement_type': announcement_type,
                    'page_count': -1,
                    'page': 1,
                    'need_break': False,
                    'cookie': cookie,
                }
            )

    def handle_response(self, response):
        if response.meta['page_count'] == -1:
            response.meta['page_count'] = int(re.findall(r"count: (\d+) ", response.text)[0]) // 10 + 1
        objs = response.xpath('//table//tr')
        for index, obj in enumerate(objs):
            if index == 0:
                continue
            item = dict()
            item['release_time'] = obj.xpath('./td[@class="time"]/text()').extract_first()
            if not self.full_dose and item['release_time'] != get_current_date():
                response.request.meta['need_break'] = True
            elif item['release_time'] < "2015-12-31":
                response.meta['need_break'] = True
            else:
                item['announcement_title'] = obj.xpath('./td[@class="tit"]/a/text()').extract_first()
                item['origin_url'] = "https://www.sdbidding.org.cn" + obj.xpath(
                    './td[@class="tit"]/a/@href').extract_first()
                item['project_type'] = obj.xpath('./td[@class="type"]/span/text()').extract_first()
                item['announcement_type'] = response.meta['announcement_type']
                item['item'] = ShanDongShengCaiGouYuZhaoBiaoWangItem()
                yield scrapy.Request(url=item['origin_url'],
                                     headers=self.faker_headers(response.meta['cookie']),
                                     callback=self.parse_item_new,
                                     meta=item)

        if not response.meta['need_break']:
            page = response.meta['page']
            page_count = response.meta['page_count']
            if page <= page_count:
                page += 1
                yield scrapy.FormRequest(
                    url=response.url,
                    callback=self.handle_response,
                    headers=self.faker_headers(response.meta['cookie']),
                    dont_filter=True,
                    formdata=self.faker_formdata(page, response.meta['announcement_type_num']),
                    meta={
                        'announcement_type_num': response.meta['announcement_type_num'],
                        'announcement_type': response.meta['announcement_type'],
                        'page_count': page_count,
                        'page': page,
                        'need_break': False,
                        'cookie': response.meta['cookie']
                    }
                )

    def faker_headers(self, cookie):
        headers = {
            "Cookie": f"{cookie}",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "max-age=0",
            "Connection": "keep-alive",
            "Content-Type": "application/x-www-form-urlencoded",
            "Host": "www.sdbidding.org.cn",
            "Sec-Fetch-Dest": "document",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-User": "?1",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36",
        }
        return headers

    def faker_formdata(self, page, announcement_type_num):
        return {
            "titleLike": "",
            "pageNo": f"{page}",
            "pageSize": "10",
            "infoType": f"{announcement_type_num}",
        }

    def faker_payloaddata(self):
        pass

    def handle_detail_page(self, response):
        pass

    def handle_pdf_content(self, response):
        """多次跳转：至pdf页，取pdf内容"""
        pass
