import re
import time
# from inline_requests import inline_requests

import requests
import scrapy
from fake_useragent import UserAgent
import json

from spidertools.common_pipeline.base_item import convert_dict
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.jiangsu.taizhou.items import GongGongZiYuanJiaoYiPingTaiTaiZhouZhanItem


class GongGongZiYuanJiaoYiPingTaiTaiZhouZhanSpider(scrapy.Spider):
    """
    公共资源交易平台泰州站 交易信息列表页获取爬虫
    http://zwfw.taizhou.gov.cn/ggzy/
    """
    name = 'GongGongZiYuanJiaoYiPingTaiTaiZhouZhan'
    name_zh = '公共资源交易平台泰州站'
    province = "江苏"
    city = '泰州'
    allowed_domains = ['58.222.225.18']
    start_urls = ['http://58.222.225.18:8138/jyxx/2.html']

    def __init__(self, full_dose=False):
        """
            :param full_dose: 是否全量爬取，默认为false
        """
        super().__init__()
        self.browser_cookie = {}
        self.page_count = -1
        self.convert_dict = convert_dict
        self.full_dose = full_dose

    def get_formdata_taizhou(self, i):
        return {
            'params': '{"siteGuid": "7eb5f7f1-9041-43ad-8e13-8fcb82ea831a","categoryNum": "001","searchTitle": "","pageIndex": ' + str(
                i) + ',"pageSize": 15,}'
        }

    def get_headers(self, access_token, refresh_token):
        headers = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "Authorization": f"Bearer {access_token}",
            # "Content-Length": "178",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Host": "58.222.225.18:8138",
            "Origin": "http://58.222.225.18:8138",
            "Proxy-Connection": "keep-alive",
            # "Referer": "http://58.222.225.18:8138/jyxx/secondPage.html?categoryNum=001&pageIndex=12",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36 Edg/86.0.622.38",
            "X-Requested-With": "XMLHttpRequest",
            "Cookie": f"oauthClientId=demoClient; oauthPath=http://192.168.0.20:8082/EpointWebBuilder; oauthLoginUrl=http://127.0.0.1/membercenter/login.html?redirect_uri=;oauthLogoutUrl=; noOauthRefreshToken={refresh_token}; noOauthAccessToken={access_token}"
        }
        return headers

    def parse(self, response):
        options_all = re.findall(r'total: (.*?),', response.text)[0]
        self.page_count = int(options_all) // 15 + 1
        url = "http://58.222.225.18:8138/EpointWebBuilder/rest/getOauthInfoAction/getNoUserAccessToken"
        res = requests.post(url)
        access_token = json.loads(res.text)['custom']['access_token']
        refresh_token = json.loads(res.text)['custom']['refresh_token']
        formdata = self.get_formdata_taizhou(0)
        url = "http://58.222.225.18:8138/EpointWebBuilder/rest/GgSearchAction/getInfoMationList"
        headers = self.get_headers(access_token, refresh_token)
        yield scrapy.FormRequest(url, formdata=formdata, headers=headers, callback=self.handle_next_page,
                                 meta={
                                     'page': 0,
                                     'need_break': False,
                                 })

    def handle_next_page(self, response):

        obj_json = json.loads(response.text)
        obj_json_page = obj_json['Table']
        rows_info_list = obj_json_page
        for row in rows_info_list:
            item = dict()
            item['release_time'] = row['infodate']
            print(item['release_time'])
            if not self.full_dose and item['release_time'] != get_current_date():
                print('i will break ...')
                response.meta['need_break'] = True
            else:
                item['origin_url'] = "http://58.222.225.18:8138" + row['infourl']
                item['announcement_title'] = row['title']
                if '【' in item['announcement_title'] and '】' in item['announcement_title']:
                    item['project_area'] = re.match(r"【(.*?)】", item['announcement_title']).group(1)
                else:
                    item['project_area'] = ""
                yield scrapy.Request(url=item['origin_url'], meta=item, callback=self.handle_detail_response,
                                  cookies=self.browser_cookie)

        if not response.meta['need_break']:
            page = response.meta['page']
            if page < self.page_count:
                page += 1
                url = "http://58.222.225.18:8138/EpointWebBuilder/rest/getOauthInfoAction/getNoUserAccessToken"
                res = requests.post(url)
                access_token = json.loads(res.text)['custom']['access_token']
                refresh_token = json.loads(res.text)['custom']['refresh_token']
                formdata = self.get_formdata_taizhou(page)
                url = "http://58.222.225.18:8138/EpointWebBuilder/rest/GgSearchAction/getInfoMationList"
                headers = self.get_headers(access_token, refresh_token)
                yield scrapy.FormRequest(url, formdata=formdata, headers=headers, callback=self.handle_next_page,
                                         meta={
                                             'page': page,
                                             'need_break': False,
                                         })

    def handle_detail_response(self, response):
        item = GongGongZiYuanJiaoYiPingTaiTaiZhouZhanItem()
        item['project_area'] = response.meta['project_area']
        item['release_time'] = response.meta['release_time']
        item['origin_url'] = response.meta['origin_url']
        item['announcement_title'] = response.meta['announcement_title']
        item['html'] = response.text#[0:40]
        item['is_parsed'] = 0
        item['source_type'] = self.name_zh
        item['province'] = self.province
        item['city'] = self.city
        yield item
