# -- coding: utf-8 --
# -- coding: utf-8 --
import re
import json
import scrapy
from spidertools.utils.time_utils import get_current_date
from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.sichuan.items import SiChuanShengGongGongZiYuanJiaoYiXinXiWangItem
from commonresources.spiders.basespider import BaseSpider
from pprint import pprint
import requests
import datetime


class SiChuanShengGongGongZiYuanJiaoYiXinXiWangSpider(BaseSpider):
    """
        四川省公共资源交易信息网     http://ggzyjy.sc.gov.cn/
    """

    def parse(self, response):
        pass

    name = 'SiChuanShengGongGongZiYuanJiaoYiXinXiWang'
    name_zh = "四川省公共资源交易信息网"
    province = "四川"
    allowed_domains = ['ggzyjy.sc.gov.cn']

    # start_url = 'http://ggzyjy.sc.gov.cn/inteligentsearch/rest/inteligentSearch/getFullTextData'

    def __init__(self, full_dose=True):
        super(SiChuanShengGongGongZiYuanJiaoYiXinXiWangSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict

    @property
    def fake_headers(self):
        headers = {

            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                          " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
        }
        return headers

    def start_requests(self):
        list1 = ['002001001', '002001008']
        tan = [v_0 for v_0 in list1]
        san = [value for value in range(0, 120, 12)]
        for value in san:
            for v_0 in tan:
                day2 = datetime.date.today()
                day1 = datetime.date.today() + datetime.timedelta(-1)
                day2 = str(day2)
                day1 = str(day1)
                value_1 = "{'webdate':'0'}"
                form_data = '{"token":"","pn":' + str(value) + \
                            ',"rn":12,"sdt":"","edt":"","wd":"","inc_wd":"","exc_wd":"","fields":"title",' \
                            f'"cnum":"","sort":"{value_1}","ssort":"title","cl":500,"terminal":"",' \
                            '"condition":[{"fieldName":"categorynum","equal":' + '"' + str(v_0) + '"' + ',"notEqual":null,"equalList":null,' \
                           '"notEqualList":null,"isLike":true,"likeType":2}],"time":[{"fieldName":"webdate",' \
                           '"startTime":"' + day1 +' 00:00:00",' \
                           '"endTime":"' + day2 +' 23:59:59"}]' \
                           ',"highlights":"","statistics":null,' \
                           '"unionCondition":null,"accuracy":"","noParticiple":"0","searchRange":null,"isBusiness":"1"}'
                url = 'http://ggzyjy.sc.gov.cn/inteligentsearch/rest/inteligentSearch/getFullTextData'
                yield scrapy.FormRequest(url=url, callback=self.handle_response,
                                         dont_filter=True, meta={"need_break": False}, body=form_data)

    def handle_response(self, response):
        response_obj = response.json()['result']
        j = response_obj["records"]
        for row in j:
            item = {}
            item["announcement_title"] = row['title']
            item["release_time"] = row["webdate"][:10]
            j = 1
            url_1 = row['linkurl']
            item["origin_url"] = 'http://ggzyjy.sc.gov.cn' + url_1
            if '002001001' in item["origin_url"]:
                item["announcement_type"] = '招标公告'
            if '002001008' in item['origin_url']:
                item["announcement_type"] = '中标公告'
            item["item"] = SiChuanShengGongGongZiYuanJiaoYiXinXiWangItem()
            yield scrapy.Request(url=item["origin_url"],
                                 callback=self.parse_item_new,
                                 meta=item)
