import json
import re

import lxml
import scrapy
from lxml import etree
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.hebei.items import HeBeiShengZhaoBiaoTouBiaoFuWuPingTaiItem
from commonresources.spiders.basespider import BaseSpider


class HeBeiShengZhaoBiaoTouBiaoFuWuPingTaiSpider(BaseSpider):
    """
        河北省招标投标服务平台     http://www.hebeieb.com/tender/xxgk/list.do#
    """
    name = 'HeBeiShengZhaoBiaoTouBiaoFuWuPingTai'
    name_zh = "河北省招标投标服务平台"
    province = "河北"

    start_urls = ['http://www.hebeieb.com/tender/xxgk/list.do#']

    def __init__(self, full_dose=False):
        super(HeBeiShengZhaoBiaoTouBiaoFuWuPingTaiSpider, self).__init__(full_dose)

    def fake_data(self, page=0):
        return {
            "page": f"{page}",
            "TimeStr": f"2000-01-01,{get_current_date()}",
            "allDq": "reset2",
            "allHy": "reset1",
            "AllPtName": "",
            "KeyStr": "",
            "KeyType": "ggname",
        }

    def parse(self, response):

        jsessionid = re.findall(r"b'(JSE.*?;)", str(response.headers['Set-Cookie']))[0]
        options_objs = response.xpath('//ul[@id="rh_tabs_title"]/li')
        for options_obj in options_objs:
            construction_id = options_obj.xpath('./a/@id').extract_first()
            construction_type = options_obj.xpath('./a/text()').extract_first()
            yield scrapy.FormRequest(
                url=f"http://www.hebeieb.com/tender/xxgk/{construction_id}.do",
                headers=self.fake_headers(jsessionid),
                callback=self.handle_response,
                formdata=self.fake_data(),
                meta={
                    "page": 0,
                    "construction_type": construction_type,
                    "need_break": False,
                    "jsessionid": jsessionid,
                }
            )

    def fake_headers(self, jsessionid):
        return {
            "Accept": "*/*",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            # "Content-Length": "81",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            # "Cookie": f"{jsessionid} __51cke__=; __tins__19687679=%7B%22sid%22%3A%201605163970640%2C%20%22vd%22%3A%202%2C%20%22expires%22%3A%201605165785701%7D; __51laig__=2",
            "Host": "www.hebeieb.com",
            "Origin": "http://www.hebeieb.com",
            # "Referer": "http://www.hebeieb.com/tender/xxgk/list.do?selectype=zbgg",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest",
        }

    def handle_response(self, response):
        if "publicont" in response.text:
            div_lists = re.findall(
                r'<div class="publicont">\s+<div>\s+<h4>\s+<a href="(.*?)" title="(.*?)" target="_blank">.*?</a>\s+<span class="span_o">(.*?)</span>\s+</h4>\s+<p class="p_tw">\s+<span>所属地市：</span>\s+<span class="span_on">(.*?)</span>\s+<span>来源平台：</span>\s+<span class="span_on">(.*?)</span>\s+<span>信息类别：</span>\s+<span class="span_on">(.*?)</span>\s+<span>行业：</span>\s+<span class="span_on">(.*?)</span>\s+</p>\s+</div>\s+</div>',
                response.text)
            for div in div_lists:
                item = dict()
                item['release_time'] = div[2].strip()
                if not self.full_dose and item['release_time'] != get_current_date():
                    response.meta['need_break'] = True
                else:
                    item['announcement_title'] = div[1].strip()
                    categoryid, infoid = re.findall(r"categoryid=(.*?)&infoid=(.*?)&", div[0])[0]
                    item[
                        'origin_url'] = f"http://www.hebeieb.com/infogk/newDetail.do?categoryid={categoryid}&infoid={infoid}&jypt=jypt"
                    item['city'] = div[3].split('-')[0].strip('[')
                    item['project_area'] = div[3].split('-')[-1].strip(']')
                    item['source_platform'] = div[4].strip(']').strip('[')
                    item['announcement_type'] = div[5].strip(']').strip('[')
                    item['info_type'] = div[6].strip('[').strip(']')
                    item['item'] = HeBeiShengZhaoBiaoTouBiaoFuWuPingTaiItem()
                    yield scrapy.Request(url=item['origin_url'],
                                         callback=self.parse_item,
                                         meta=item,
                                         dont_filter=True,
                                         )
            if not response.meta['need_break']:
                page = response.meta['page'] + 1
                yield scrapy.FormRequest(
                    url=response.url,
                    headers=self.fake_headers(response.meta['jsessionid']),
                    callback=self.handle_response,
                    formdata=self.fake_data(page),
                    dont_filter=True,
                    meta={
                        "page": page,
                        "construction_type": response.meta["construction_type"],
                        "need_break": False,
                        "jsessionid": response.meta['jsessionid']
                    }
                )
        else:
            print('end')
            pass
