import time
import re
import scrapy
from scrapy.shell import inspect_response
import requests
from Hue.basepro import ZhengFuBaseSpider

token_rex = re.compile(
    pattern="initPubProperty\(.*?attrs",
    flags=re.S
    )


class ShenyangSpider(ZhengFuBaseSpider):
    """POST"""
    name = 'Shenyang'
    allowed_domains = ['shenyang.gov.cn', 'so-gov.cn']
    start_urls = ['http://www.shenyang.gov.cn/']
    api = 'https://api.so-gov.cn/s'
    token_api = 'http://www.shenyang.gov.cn/so/s?qt={keyword}&siteCode=2101000053&tab=all&toolsStatus=1'
    keywords = ['煤炭']
    method = "POST"
    token_cache = {}
    data = {
        "siteCode": "2101000053",
        "tab": "all",
        "timestamp": "{timestamp}",
        "wordToken": "{wordtoken}",
        "page": "{page}",
        "pageSize": "20",
        "qt": "{keyword}",
        "timeOption": "0",
        "sort": "relevance",
        "keyPlace": "0",
        "fileType": "",
    }
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:94.0) Gecko/20100101 Firefox/94.0",
        "Accept": "*/*",
        "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
        "Accept-Encoding": "gzip, deflate, br",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "suid": "404",
        "Origin": "http://www.shenyang.gov.cn",
        "Connection": "keep-alive",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "no-cors",
        "Sec-Fetch-Site": "cross-site",
        "Pragma": "no-cache",
        "Cache-Control": "no-cache"
            }
    parse_first = False

    def edit_data(self, data, keyword, page):
        self.logger.info("Get wordToken of {}".format(keyword))
        token_resp = requests.get(self.token_api.format(keyword=keyword), headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:94.0) Gecko/20100101 Firefox/94.0"})
        self.logger.info(token_resp.cookies)
        token = token_rex.search(token_resp.text).group().split()[-3]
        token = token.split("'")[1]
        data["wordToken"] = token
        data["qt"] = str(keyword)
        data["page"] = str(page)
        data["timestamp"] = str(time.time_ns())[:13]
        return data

    def edit_page(self, response):
        inspect_response(response, self)
        raw_data = response.json()
        total_items_num = raw_data["data"]["search"]["totalHits"]
        total_page = int(total_items_num) // 20 + 1
        return total_page

    def edit_items_box(self, response):
        raw_data = response.json()
        items_box = raw_data["data"]["search"]["searchs"]
        yield items_box

    def edit_items(self, items_box):
        for item in items_box:
            yield item
