import scrapy
from commonresources.spiders.basespider import BaseSpider
import time
import re
from commonresources.spider_items.shanxi.items import JinChanDianZiZhaoTouBiaoZongHeJiaoYiPingTaiItem
from commonresources.inner_utils.standardize_field_utils import check_city_field
from commonresources.spider_items.base_item import convert_dict
from spidertools.utils.time_utils import get_current_date


class JinChanDianZiZhaoTouBiaoZongHeJiaoYiPingTai(BaseSpider):
    """"
        金蝉电子招投标综合交易平台
    """
    name = 'JinChanDianZiZhaoTouBiaoZongHeJiaoYiPingTai'
    name_zh = "金蝉电子招投标综合交易平台"
    province = "山西省"

    def __init__(self, full_dose=False):
        super(JinChanDianZiZhaoTouBiaoZongHeJiaoYiPingTai, self).__init__(full_dose)
        self.announcement_type = {"TENDER": "招标公告",
                                  "PREQUALIFICATION": "资格预审公告",
                                  "Candidate": "中标候选人公示",
                                  "WIN_THE_BID": "中标公告"
                                  }
        self.base_url = "http://www.jcebid.com/announcement/page"

        self.convert_dict = {"industry": "行业",
                             "sources_of_funding": "资金来源",
                             "announcement_status": "公告状态"}
        self.convert_dict.update(convert_dict)

    def start_requests(self):
        for announcement in self.announcement_type:
            yield scrapy.FormRequest(url=self.base_url,
                                     formdata={"type": f"{announcement}"},
                                     meta={"announcement_type": f"{announcement}",
                                           "page": "0",
                                           "need_break": False})

    def parse(self, response):
        page = int(response.meta["page"])
        divs = response.xpath("//div[@class='ContentList_conT']/div[@class='list_box']")
        total_pages = int(re.search(r"var total = '(\d+)'", response.text).group(1))
        for div in divs:
            item = {}
            item["release_time"] = div.xpath("./span[@class='right']/text()").extract_first().replace("发布日期：", "")
            if not self.full_dose and item["release_time"] != get_current_date():
                response.meta["need_break"] = True
            else:
                item["announcement_status"] = div.xpath("./span[@class='titleG']/a[1]/text()").extract_first().replace("[", "").replace("]", "")
                item["announcement_title"] = div.xpath("./span[@class='titleG']/a[2]/@title").extract_first()
                item["origin_url"] = "http://www.jcebid.com" + div.xpath("./span[@class='titleG']/a[2]/@href").extract_first()
                item["city"] = div.xpath("./span[@class='label']/a[1]/text()").extract_first().replace("地区：", "")  # 地区
                item["industry"] = div.xpath("./span[@class='label']/a[2]/text()").extract_first().replace("行业：", "")  # 行业
                item["sources_of_funding"] = div.xpath("./span[@class='label']/a[3]/text()").extract_first().replace("资金来源：", "")  # 资金来源
                item["announcement_type"] = self.announcement_type[response.meta["announcement_type"]]
                yield scrapy.FormRequest(url=item["origin_url"],
                                         callback=self.handle_response,
                                         meta=item)

        if not response.meta["need_break"]:
            if page <= total_pages:
                page += 1
                yield scrapy.FormRequest(url=self.base_url,
                                         callback=self.parse,
                                         formdata={"type": f"{response.meta['announcement_type']}",
                                                   "page": f"{page}"},
                                         meta={"announcement_type": f"{response.meta['announcement_type']}",
                                               "page": f"{page}",
                                               "need_break": False})

    def handle_response(self, response):
        prefix_url = re.search(r'<iframe src="(.*?)"', response.text).group(1)
        id_ = prefix_url.split("id=")[-1]
        url_ = ""
        if response.meta["announcement_type"] in ["招标公告", "资格预审公告"]:
            url_ = prefix_url.split("/notice/")[0] + "/service/api/notice/find?id=" + id_ + "&_=" + str(
                int(float(time.time()) * 100))
        elif response.meta["announcement_type"] == "中标候选人公示":
            url_ = prefix_url.split("com/")[0] + "com/service/api/candidatePublicity/findById?id=" + id_ + "&_=" + str(
                int(float(time.time()) * 100))
        elif response.meta["announcement_type"] == "中标公告":
            if "zonghe" in prefix_url:
                url_ = prefix_url.split("zonghe/")[
                           0] + "zonghe/service/api/winBidNotice/findById?id=" + id_ + "&_=" + str(
                    int(float(time.time()) * 100))
            else:
                url_ = prefix_url.split("com/")[0] + "com//service/api/winBidNotice/findById?id=" + id_ + "&_=" + str(
                    int(float(time.time()) * 100))
        response.meta["item"] = JinChanDianZiZhaoTouBiaoZongHeJiaoYiPingTaiItem()
        yield scrapy.Request(url=url_,
                             callback=self.parse_item_new,
                             meta=response.meta)

    def parse_item_new(self, response):
        """
            本函数使用要求：
            1）response.meta['item'] 字段为传递的定义的items对象
            2）其他无用的信息，不要传递到最后的meta中
            3）meta中自带的信息'depth'/'download_timeout'/'download_slot'/'download_latency'/'proxy'，会被忽略
            4)如果是pdf/jpg等内容，请在上级请求前的meta字段中添加，is_pdf=1/is_jpg=1的字段
            5)如果是多级的内容，前一级url页面请保存在html字段中
        """
        item = response.meta['item']
        for obj in response.meta:
            try:
                if obj not in ['depth', 'download_timeout', 'download_slot', 'download_latency', 'proxy', 'retry_times',
                               'item', "is_pdf", "is_jpg"]:
                    item[obj] = response.meta[obj]
            except Exception as e:
                with open('./import.log', "w+", encoding='utf-8') as f:
                    f.write(obj + str(e))
                print(f"这是新的字段:{e},请予以关注，添加或者忽略")
        if "is_pdf" in response.meta:
            if len(str(response.body)) > 16000000:  # 16793598:  # len(str(response.body))
                item['is_pdf'] = 1
                item['pdf_content'] = response.body
        elif "is_jpg" in response.meta:
            item['is_jpg'] = 1
            item['jpg_content'] = response.content
        else:
            item['html'] = response.text
        if "is_parsed" not in item:
            item['is_parsed'] = 0
        item['source_type'] = self.name_zh
        item['province'] = self.province
        if 'project_city' in item:
            item['project_city'] = check_city_field(item['project_city'])
            if not item['project_city']:
                del item['project_city']
        yield item

    @property
    def header_(self):
        return {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"}