# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request, FormRequest
from commonresources.spider_items.jiangsu.suzhou.items import SuZhouZhengFuCaiGouWangItem
from spidertools.common_pipeline.base_item import convert_dict
from spidertools.utils.time_utils import get_current_date

import json
import requests


class SuZhouZhengFuCaiGouWangSpider(scrapy.Spider):
    '''
    苏州市 政府采购网 http://czju.suzhou.gov.cn/zfcg/html/main/index.shtml
    '''
    name = 'SuZhouZhengFuCaiGouWang'
    name_zh = '苏州政府采购网'
    province = "江苏"
    city = '苏州'
    allowed_domains = ['czju.suzhou.gov.cn']
    start_urls = ['http://czju.suzhou.gov.cn/']

    def __init__(self, full_dose=False):
        '''        :param full_dose: 是否全量爬取，默认为false
        '''
        self.browser_cookie = {}
        self.page_count = -1
        self.convert_dict = convert_dict
        self.full_dose = full_dose
        super().__init__()

    def start_requests(self):

        url = "http://czju.suzhou.gov.cn/zfcg/html/search.shtml?type=&title=&choose=&projectType=0&zbCode=&appcode="
        # self.browser.get(url=url)
        # time.sleep(10)
        # self.browser_cookie = self.browser.get_cookies()
        fake_headers = self.fake_head()
        req = requests.get(url, headers=fake_headers)
        self.browser_cookie = requests.utils.dict_from_cookiejar(req.cookies)

        page = 1
        form_data = self.fake_formdata(page)

        # cookie_dict = {}
        # for cookie in self.browser_cookie:
        #    cookie_dict[cookie['name']] = cookie['value']

        post_url = "http://czju.suzhou.gov.cn/zfcg/content/searchContents.action"
        yield FormRequest(post_url, formdata=form_data, headers=fake_headers, cookies=self.browser_cookie,
                          callback=self.parse, meta={"page": page})

    def close(self, spider):
        pass
        # self.browser.quit()

    def check_if_need_break(self, item_day, full_dose):
        need_break = False
        current_day = get_current_date()
        if not full_dose:
            if current_day != item_day:
                need_break = True
        return need_break

    def parse(self, response):
        page = int(response.meta['page'])
        data = response.text

        json_obj = json.loads(data)

        need_break = False

        if 'rows' in json_obj:
            item_url = 'http://czju.suzhou.gov.cn/zfcg/html/project/%s.shtml'
            # cookie_dict = {}
            # for cookie in self.browser_cookie:
            #    cookie_dict[cookie['name']] = cookie['value']
            fake_headers = self.fake_head()
            for item in json_obj['rows']:
                title = item['TITLE']
                release_time = item["RELEASE_TIME"]
                if release_time:
                    item_day = release_time.split(" ")
                    need_break = self.check_if_need_break(item_day[0], self.full_dose)

                if need_break:
                    break

                projectid = item['PROJECTID']
                project_area = item['AREA']
                belong_to = ""
                if item['IS_SG'] == "1":
                    belong_to = "部省属单位"

                categoryStr = ""
                if item["CATEGORY_TYPE"] == "A":
                    categoryStr = "货物类"
                elif item["CATEGORY_TYPE"] == "B":
                    categoryStr = "工程类"
                elif item["CATEGORY_TYPE"] == "C":
                    categoryStr = "服务类"

                detail_url = item_url % projectid
                yield Request(detail_url, callback=self.parse_html, cookies=self.browser_cookie, headers=fake_headers,
                              meta={"title": title, 'release_time': release_time, 'project_area': project_area,
                                    'belong_to': belong_to, "category_type": categoryStr})

        if not need_break:
            if self.page_count == -1 and 'total' in json_obj:
                self.page_count = int(json_obj['total'] / 30) + 1

            if self.page_count != -1 and page < self.page_count:
                page = page + 1
                form_data = self.fake_formdata(page)
                fake_headers = self.fake_head()
                post_url = "http://czju.suzhou.gov.cn/zfcg/content/searchContents.action"
                yield FormRequest(post_url, formdata=form_data, headers=fake_headers, cookies=self.browser_cookie,
                                  callback=self.parse, meta={"page": page})

    def fake_formdata(self, page):
        '''
        构造post 的 form_data
        :param page:
        :return:
        '''
        form_data = {
            "title": "",
            "choose": "",
            "type": "0",
            "zbCode": "",
            "appcode": "",
            "page": str(page),
            "rows": "30"
        }
        return form_data

    def fake_head(self):
        headers = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Content-Type": "application/x-www-form-urlencoded;",
            "charset": "UTF-8",
            "Host": "czju.suzhou.gov.cn",
            "Origin": "http://czju.suzhou.gov.cn",
            "Proxy-Connection": "keep-alive",
            "Referer": "http://czju.suzhou.gov.cn/zfcg/html/search.shtml?type=&title=&choose=&projectType=0&zbCode=&appcode=",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest"
        }
        return headers

    def parse_html(self, response):
        title = response.meta['title']
        release_time = response.meta["release_time"]
        project_area = response.meta['project_area']
        belong_to = response.meta['belong_to']
        category_type = response.meta["category_type"]
        html = response.text
        origin_url = response.url

        item = SuZhouZhengFuCaiGouWangItem()
        item["announcement_title"] = title
        item["release_time"] = release_time
        item['project_area'] = project_area
        item['belong_to'] = belong_to
        item['html'] = html[0:40]
        item['origin_url'] = origin_url
        item["construction_type"] = category_type
        item['is_parsed'] = 0
        item['source_type'] = self.name_zh
        item['province'] = self.province
        item['city'] = self.city

        yield item
