import scrapy
from spidertools.utils.time_utils import get_current_date
from commonresources.spider_items.shandong.items import ShanDongShengJiaoTongJianSheShiChangJianGuanGongGongFuWuPingTaiItem
from commonresources.spiders.basespider import BaseSpider
from commonresources.spider_items.base_item import convert_dict
class ShanDongShengJiaoTongJianSheShiChangJianGuanGongGongFuWuPingTaiSpider(BaseSpider):
    """"
        山东省交通建设市场监管公共服务平台
    """
    name = 'ShanDongShengJiaoTongJianSheShiChangJianGuanGongGongFuWuPingTai'
    name_zh = "山东省交通建设市场监管公共服务平台"
    province = "山东省"
    allowed_urls = ['http://60.208.61.132:8083/jsp/shouye/ztb/ztbxx.jsp']
    start_urls = ["http://60.208.61.132:8083/jsp/shouye/ztb/ztbxx.jsp"]

    def __init__(self, full_dose=False):
        super(ShanDongShengJiaoTongJianSheShiChangJianGuanGongGongFuWuPingTaiSpider, self).__init__(full_dose)
        self.convert_dict = {
            "bid_winner": "中标单位",
            "section_num": "标段编号",
            "section_name": "标段名称",
            "bid_kind": "工程类别",
            "Project_owner": "招标人信息名称"
        }
        self.convert_dict.update(convert_dict)  # 存储时转化用
        self.urls = {"招标公告": "http://60.208.61.132:8083/jsp/shouye/ztb/zbggshujunew.jsp",
                     "评标结果公示": "http://60.208.61.132:8083/jsp/shouye/ztb/pbggshuju.jsp",
                     "中标公告": "http://60.208.61.132:8083/jsp/shouye/ztb/zbjgshujunew.jsp",
                     "投标行为处理": "http://60.208.61.132:8083/jsp/shouye/ztb/tbxwshuju.jsp"}

    def parse(self, response):
        types = response.xpath("//div[@id='erji-left-link']/ul/li/a")

        for index, _type in enumerate(types):
            announcement_type = _type.xpath("./text()").extract_first().strip()  # 公告类型
            if announcement_type in self.urls:
                href = self.urls[announcement_type]  # 公告的url
                page = 1
                yield scrapy.FormRequest(url=href,
                                         callback=self.handle_response,
                                         headers=self.fake_headers,
                                         dont_filter=True,
                                         formdata={"page": f"{page}",
                                                   "lows": "10"},
                                         meta={"page": page,
                                               "announcement_type": announcement_type,
                                               "need_break": False})

    def handle_response(self, response):
        announcement_type = response.meta["announcement_type"]
        for row in response.json()["rows"]:
            item = dict()
            time_list = list(filter(lambda x: row.get(x, ""), ["appro_time", "Pub_time", "sj"]))
            item["release_time"] = row[time_list[0]].split(" ")[0] if time_list else ""  # 公告发布时间
            if not self.full_dose and item["release_time"] != get_current_date():
                response.meta["need_break"] = True
            else:
                item["announcement_type"] = response.meta["announcement_type"]  # 公告类型
                if row.get("bid_kind"): item["bid_kind"] = row.get("bid_kind")  # 招标类别

                item["announcement_title"] = row.get("notice_name", row.get("bid_name"))  # 公告名称
                notice_type = row.get("notice_type", "")  # 公告类型的号码
                if item["announcement_type"] == "招标公告" or item["announcement_type"] == "评标结果公示":
                    if row.get("true_name"): item["Project_owner"] = row.get("true_name")  # 项目业主
                elif item["announcement_type"] == "中标公告":
                    if row.get("true_name"): item["bid_winner"] = row.get("true_name")  # 中标单位

                id1 = row.get("ID", "")
                pro_track_id_zbcq = row.get("PRO_TRACK_ID_zbcq", "")
                proId_zbcq = row.get("proId_zbcq", "")
                qaid = row.get("qaid", "")
                pro_track_id = row.get("PRO_TRACK_ID", "")

                section_list = list(filter(lambda x: row.get(x, ""), ["section", "bidNum1"]))
                if section_list:
                    section = row[section_list[0]]  # 标段号/标段名称
                    item["section_num"], item["section_name"] = section.split("/")
                if item["announcement_type"] == "招标公告":
                    item["origin_url"] = f'http://60.208.61.132:8085/ztbSub/SubNoticeServlet?option=showNoticeInfo&id={id1}&preview_type={notice_type}'
                elif item["announcement_type"] == "招标澄清":
                    item["origin_url"] = f'http://60.208.61.132:8085/ztbSub/SubAboutNoticeServlet?option=showAboutNoticeInfo&id={proId_zbcq}&proTrackId={pro_track_id_zbcq}&notice_type={notice_type}'
                elif item["announcement_type"] == "招标答疑":
                    item["origin_url"] = f'http://60.208.61.132:8085/ztbSub/SubAboutQuestionServlet?option=showNoticeInfo&id={qaid}'
                elif item["announcement_type"] == "评标结果公示":
                    item["origin_url"] = f'http://60.208.61.132:8085/ztbSub/SubNoticeServlet?option=showNoticeInfo&id={id1}&preview_type={notice_type}'
                elif item["announcement_type"] == "中标公告":
                    item["origin_url"] = f'http://60.208.61.132:8085/ztbSub/SubWinnerServlet?option=preview&id={pro_track_id}&bidKindOption=&page=1'
                elif item["announcement_type"] == "投标行为处理":
                    item["origin_url"] = f'http://60.208.61.132:8085/ztbSub/ComEvalServlet?option=toOnePage&proTrackId={pro_track_id}&sectionId={id1}&preOrAfter=1&page=1&search=&statusRadio=1'

                item["item"] = ShanDongShengJiaoTongJianSheShiChangJianGuanGongGongFuWuPingTaiItem()
                yield scrapy.Request(url=item["origin_url"],
                                     headers=self.fake_headers,
                                     callback=self.parse_item_new,
                                     dont_filter=True,
                                     meta=item)

        if not response.meta["need_break"] :
            total_page = response.json()["total"] // 10 + 1
            page = response.meta["page"] + 1
            if page <= total_page:
                yield scrapy.FormRequest(url=response.url,
                                         callback=self.handle_response,
                                         headers=self.fake_headers,
                                         dont_filter=True,
                                         formdata={"page": f"{page}",
                                                   "lows": "10"},
                                         meta={"page": page,
                                               "announcement_type": announcement_type,
                                               "need_break": False})

    @property
    def fake_headers(self):
        return {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3823.400 QQBrowser/10.7.4307.400"
        }
