import json
from datetime import datetime

import scrapy
from lxml import etree
from spidertools.common_pipeline.base_item import convert_dict
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.jiangsu.zhenjiang.items import ZhenJiangGongGongZiYuanJiaoYiZhongXinItem


class LianYunGangGongGongZiYuanJiaoYiWangSpider(scrapy.Spider):
    """
        连云港公共资源交易网
        item 同镇江公共资源交易中心
        http://spzx.lyg.gov.cn/lygweb/
    """
    name = 'LianYunGangGongGongZiYuanJiaoYiWang'
    name_zh = "连云港公共资源交易网"
    province = "江苏"
    city = '连云港'
    allowed_domains = ['ggzy.zhenjiang.gov.cn']

    def __init__(self, full_dose=False):
        '''        :param full_dose: 是否全量爬取，默认为false
        '''
        self.browser_cookie = {}
        self.convert_dict = convert_dict
        self.full_dose = full_dose
        super().__init__()

    def parse(self, response):
        pass

    def start_requests(self):
        yield scrapy.FormRequest(url="http://spzx.lyg.gov.cn/lygweb/jyxx/tradeInfo.html",
                                 headers=self.get_headers(),
                                 callback=self.start_requests2,
                                 )

    def classify(self, res):
        trade_info = {}
        html = etree.HTML(res.text)
        rows_obj = html.xpath("//li[@class='wb-tree-items.py haschild ']")
        for rows in rows_obj:
            classify = list()
            construction_type = rows.xpath("./h3/span/a/text()")[0]
            for i in rows.xpath("./ul/li/a"):
                it = dict()
                it['info_type'] = i.xpath('./text()')[0]
                it['target_url'] = 'http://spzx.lyg.gov.cn' + i.xpath('./@href')[0]
                classify.append(it)
            trade_info[f'{construction_type}'] = classify
        return trade_info

    def start_requests2(self, response):
        classify = self.classify(response)
        for i in classify:
            construction_type = i
            for j in range(0, len(classify[i])):
                info_type = classify[i][j]['info_type']
                target_url = classify[i][j]['target_url']
                categoryname = target_url.split('/')[-1]
                yield scrapy.Request(url=target_url,
                                     headers=self.get_headers(),
                                     callback=self.handle_response,
                                     meta={
                                         'needbreak': False,
                                         'construction_type': construction_type,
                                         'info_type': info_type,
                                         'categoryname': categoryname,
                                         'page': 0,
                                         'page_count': -1,
                                     },
                                     dont_filter=True
                                     )

    def handle_response(self, response):
        if response.request.meta['page_count'] == -1:
            response.request.meta['page_count'] = int(response.xpath('//li[@id="index"]/text()').extract_first().split('/')[-1])
        objs = response.xpath('//tr[@class="ewb-trade-tr"]')
        for obj in objs:
            item = dict()
            item['release_time'] = obj.xpath('./td[last()]/text()').extract_first().strip()
            if not self.full_dose and item['release_time'] != get_current_date():
                response.request.meta['needbreak'] = True
            else:
                item['announcement_title'] = obj.xpath('./td/a/text()').extract_first()
                item['project_area'] = self.get_area(obj.xpath('./td/span/text()').extract_first())
                item['origin_url'] = 'http://spzx.lyg.gov.cn' + obj.xpath('./td/a/@href').extract_first()
                item['construction_type'] = response.request.meta['construction_type']
                item['info_type'] = response.request.meta['info_type']
                yield scrapy.Request(url=item['origin_url'],
                                     dont_filter=True,
                                     headers=self.get_headers(),
                                     callback=self.parse_html,
                                     meta=item)
        if not response.request.meta['needbreak']:
            page_count = response.request.meta['page_count']
            page = response.request.meta['page']
            if page < page_count:
                page += 1
                url = "http://spzx.lyg.gov.cn/lygweb/jyxx/" + response.request.meta['categoryname'][0:6] + "/" \
                      + response.request.meta['categoryname'] + f"{page}.html"
                yield scrapy.Request(url=url,
                                     headers=self.get_headers(),
                                     callback=self.handle_response,
                                     meta={
                                         'needbreak': False,
                                         'construction_type': response.request.meta['construction_type'],
                                         'info_type': response.request.meta['info_type'],
                                         'categoryname': response.request.meta['categoryname'],
                                         'page': page,
                                         'page_count': page_count,
                                     },
                                     dont_filter=True
                                     )

    def get_area(self, area):
        d = {
            320701: "市区",
            320702: "连云区",
            320706: "海州区",
            320708: "经济技术开发区",
            320710: "徐圩新区",
            320721: "赣榆区",
            320722: "东海县",
            320723: "灌云县",
            320724: "灌南县",
        }
        return d.get(int(area), area)

    def get_headers(self):
        return {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "Cache-Control": "max-age=0",
            "Connection": "keep-alive",
            "Host": "spzx.lyg.gov.cn",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"
        }

    def parse_html(self, response):
        item = ZhenJiangGongGongZiYuanJiaoYiZhongXinItem()
        item['html'] = response.text# [0:40]
        item["announcement_title"] = response.meta['announcement_title']
        item["release_time"] = response.meta["release_time"]
        item['project_area'] = response.meta['project_area']
        item['origin_url'] = response.meta['origin_url']
        item["construction_type"] = response.meta['construction_type']
        item['info_type'] = response.meta['info_type']
        item['is_parsed'] = 0
        item['source_type'] = self.name_zh
        item['province'] = self.province
        item['city'] = self.city
        yield item
