import json
import re

import requests
import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.zhejiang.items import ZheJiangShengGongGongZiYuanJiaoYiPingTaiItem
from commonresources.spiders.basespider import BaseSpider


class ZheJiangShengGongGongZiYuanJiaoYiPingTaiSpider(BaseSpider):
    """
        浙江省公共资源交易平台     http://www.zjpubservice.com/
    """
    name = 'ZheJiangShengGongGongZiYuanJiaoYiPingTai'
    name_zh = "浙江省公共资源交易平台"
    province = "浙江"

    def __init__(self, full_dose=False):
        self.capacity = 250 if full_dose else 12
        super(ZheJiangShengGongGongZiYuanJiaoYiPingTaiSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict

    def parse(self, response):
        pass

    def classify(self):
        city = [{"name": "杭州地区", "code": "3301"},
                {"name": "宁波地区", "code": "3302"},
                {"name": "温州地区", "code": "3303"},
                {"name": "湖州地区", "code": "3305"},
                {"name": "嘉兴地区", "code": "3304"},
                {"name": "绍兴地区", "code": "3306"},
                {"name": "金华地区", "code": "3307"},
                {"name": "衢州地区", "code": "3308"},
                {"name": "舟山地区", "code": "3309"},
                {"name": "台州地区", "code": "3310"},
                {"name": "丽水地区", "code": "3311"}]

        info_type = {
            "建设工程": [
                {
                    "categorynum": "002001001",
                    "categoryname": "招标公告"
                },  #
                {
                    "categorynum": "002001002",
                    "categoryname": "资格预审公告"
                },
                {
                    "categorynum": "002001003",
                    "categoryname": "开标结果公示"
                },
                {
                    "categorynum": "002001004",
                    "categoryname": "中标候选人公示"
                },
                {
                    "categorynum": "002001005",
                    "categoryname": "中标结果公告"
                },
            ],
            # "政府采购": [
            #     {
            #         "categorynum": "002002001",
            #         "categoryname": "采购公告"
            #     },
            #     {
            #         "categorynum": "002002002",
            #         "categoryname": "中标成交公告"
            #     },
            # ],
            # "土地矿业": [
            #     {
            #         "categorynum": "001003001",
            #         "categoryname": "出让公告"
            #     },
            #     {
            #         "categorynum": "001003002",
            #         "categoryname": "出让土地实时报价"
            #     },
            #     {
            #         "categorynum": "001003003",
            #         "categoryname": "成交结果公示"
            #     }
            # ],
            # "国有产权": [
            #     {
            #         "categorynum": "002004001",
            #         "categoryname": "挂牌公告"
            #     },
            #     {
            #         "categorynum": "002004002",
            #         "categoryname": "成交公告"
            #     },
            # ],
            #
            # "其他交易": [
            #     {
            #         "categorynum": "002007001",
            #         "categoryname": "交易公告"
            #     },
            #     {
            #         "categorynum": "002007002",
            #         "categoryname": "交易结果"
            #     },
            # ],
        }
        return city, info_type

    def start_requests(self):
        citys, info_types = self.classify()
        response = requests.get(url="http://www.zjpubservice.com/",
                                headers={
                                    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
                                              ",application/signed-exchange;v=b3;q=0.9",
                                    "Accept-Encoding": "gzip, deflate",
                                    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
                                    # "Host": "www.zjpubservice.com",
                                    "Proxy-Connection": "keep-alive",
                                    "Upgrade-Insecure-Requests": "1",
                                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
                                })
        cookie_serverid = re.findall(r'HttpOnly, (.*?);Path', response.headers['Set-Cookie'])[0]
        for city_objs in citys:
            city_name = city_objs['name']
            city_code = city_objs['code']
            for info_type, values_objs in info_types.items():
                for value_obj in values_objs:
                    categorynum = value_obj['categorynum']
                    categoryname = value_obj['categoryname']
                    body = json.dumps(self.get_payloaddata(0, categorynum, city_code))
                    yield scrapy.FormRequest(
                        url="http://www.zjpubservice.com/inteligentsearch/rest/inteligentSearch/getFullTextData",
                        headers=self.get_headers(cookie_serverid),
                        callback=self.handle_response,
                        body=body,
                        meta={
                            'url': "http://www.zjpubservice.com/inteligentsearch/rest/inteligentSearch/getFullTextData",
                            'city': city_name,
                            'need_break': False,
                            'construction_type': info_type ,
                            "project_type": categoryname,
                            'page_pn': 0,
                            'cookie_serverid': cookie_serverid,
                            'page_count': -1,
                            'categorynum': categorynum,
                            'city_code': city_code,
                        },
                        dont_filter=True
                    )

    def handle_response(self, response):

        try:
            json_obj = response.json()['result']
        except:
            print("1" * 30)

        if response.meta['page_count'] == -1:
            response.meta['page_count'] = int(json_obj['totalcount'])
        if response.meta['page_count']:
            for row in json_obj['records']:
                item = dict()
                item['release_time'] = row['webdate'][0:10]
                if not self.full_dose and item['release_time'] != get_current_date():
                    response.meta['need_break'] = True
                else:
                    item['announcement_title'] = row['title']
                    item['construction_type'] = response.meta['construction_type']
                    item['project_type'] = response.meta['project_type']
                    item['origin_url'] = 'http://www.zjpubservice.com' + row['linkurl']
                    item['project_area'] = row['infod']
                    item['city'] = response.meta['city']
                    item['item'] = ZheJiangShengGongGongZiYuanJiaoYiPingTaiItem()

                    yield scrapy.Request(url=item['origin_url'],
                                         callback=self.handle_detail_page,
                                         meta=item,
                                         dont_filter=True,
                                         )
        else:
            response.meta['need_break'] = True

        if not response.meta['need_break']:
            page_pn = response.meta['page_pn']
            page_count = response.meta['page_count']
            if page_pn <= page_count:
                page_pn += self.capacity
                construction_type = response.meta['construction_type']
                cookie_serverid = response.meta['cookie_serverid']
                categorynum = response.meta['categorynum']
                city_code = response.meta['city_code']
                body = json.dumps(self.get_payloaddata(page_pn, categorynum, city_code))
                yield scrapy.FormRequest(url=response.url,
                                         headers=self.get_headers(cookie_serverid),
                                         callback=self.handle_response,
                                         body=body,
                                         meta={
                                             'need_break': False,
                                             'construction_type': construction_type,
                                             "project_type": response.meta['project_type'],
                                             'page_pn': page_pn,
                                             'cookie_serverid': cookie_serverid,
                                             'page_count': page_count,
                                             'categorynum': categorynum,
                                             'city': response.meta['city'],
                                             'city_code': city_code,
                                         },
                                         dont_filter=True,
                                         )

    def get_headers(self, SERVERID):
        headers = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "Content-Type": "application/json;charset=UTF-8",
            "Cookie": f"HttpOnly; JSESSIONID=A2705C89D6356F3EC1B904A3FC5CAFA6; HttpOnly; oauthClientId=demoClient; "
                      f"oauthPath=http://223.4.69.84:8080/EpointWebBuilder; oauthLoginUrl=http://127.0.0.1/"
                      f"membercenter/login.html?redirect_uri=; oauthLogoutUrl=; noOauthRefreshToken="
                      f"1dc347b4d59250e0d344fa1a896e6808; noOauthAccessToken=bf3606f81125ad67d3fa1132ffc360dd; {SERVERID}",
            # "Host": "www.zjpubservice.com",
            # "Origin": "http://www.zjpubservice.com",
            "Proxy-Connection": "keep-alive",
            # "Referer": "http://www.zjpubservice.com/jyxxgk/list.html",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                          " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
            "X-Requested-With": "XMLHttpRequest",
        }
        return headers

    def get_payloaddata(self, start_num, categoryname, city_code):
        return {
            "token": "",
            "pn": start_num,
            "rn": self.capacity,
            "sdt": "",
            "edt": "",
            "wd": "null",
            "inc_wd": "",
            "exc_wd": "",
            "fields": "title",
            "cnum": "001",
            "sort": "{\"webdate\": \"0\"}",
            "ssort": "title",
            "cl": 200,
            "terminal": "",
            "condition": [{
                'fieldName': "categorynum",
                'isLike': 'true',
                'likeType': 2,
                'equal': f"{categoryname}",
            }, {
                'fieldName': "infoc",
                'isLike': 'true',
                'likeType': 2,
                'equal': f"{city_code}",
            }],
            "time": [{
                'fieldName': "webdate",
                'startTime': "2000-11-11 00:00:00",
                'endTime': f"{get_current_date()} 23:59:59"
            }],
            "highlights": "",
            "statistics": "null",
            "unionCondition": "null",
            "accuracy": "",
            "noParticiple": "0",
            "searchRange": "null",
            "isBusiness": "1"
        }

    def handle_detail_page(self, response):
        if "摘要信息" in response.text:
            """
            注：类似着这种网站：1)（挂牌交易）：http://cs3.bidding.gov.cn:6062/platform//project/notice/notice.jsp?id=13ac907ff22df39d37665303b1a0060a
            2)既无pdf，也无内容:https://zfcg.czt.zj.gov.cn/innerUsed_noticeDetails/index.html?noticeId=7369608
            3)跳转到其他主页暂时也保存到mongo数据库中：
                如：http://www.zjpubservice.com/jyxxgk/002007/002007002/20201118/bc402731-afe8-4485-9f49-6aa93e4fd0bb.html，
                    跳转到http://220.191.214.173
            4)开标企业
            
            """
            if "挂牌交易" not in response.meta['construction_type']:
                response.meta['origin_url'] = re.findall(r'<a\s+?href="(htt.*?\?{0,1}.*?)">', response.text)[0]
                if "noticeId" in response.meta['origin_url']:
                    """类型举例：
                    1)项目名称：浙江省建设工程设备招标有限公司关于杭州市富阳区教育局中小学幼儿园智慧食堂建设项目的中标(成交)结果公告
                    2)类别：建设工程-中标结果公告
                    3)从列表页跳转的url(查看原文)(GET):http://www.zjpubservice.com/jyxxgk/002002/002002002/20201118/c28f0deb-4418-4ccd-abdc-1844c3e2aa4f.html
                    4)从3)中提取的url(GET):https://zfcg.czt.zj.gov.cn/innerUsed_noticeDetails/index.html?noticeId=7369599
                    5)4)中真正含有内容的url(GET):https://zfcgmanager.czt.zj.gov.cn/cms/api/cors/remote/results?noticeId=7369599&url=noticeDetail
                    6)这时的页面直接时json内容，直接返回item处理即可
                    """
                    noticeId = re.findall(r"noticeId=(\d+)", response.meta['origin_url'])[0]
                    response.meta[
                        'origin_url'] = f"https://zfcgmanager.czt.zj.gov.cn/cms/api/cors/remote/results?noticeId={noticeId}&url=noticeDetail"
                    yield scrapy.Request(
                        url=response.meta['origin_url'],
                        callback=self.parse_item,
                        headers={
                            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                                          " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
                        },
                        meta=response.meta,
                        dont_filter=True,
                    )
                elif "id=" in response.meta['origin_url'] \
                    and "&" not in response.meta['origin_url']\
                    and "infoid=" not in response.meta['origin_url']:
                    """
                        &  反例：
                            杭州地铁运营分公司2020年-2022年卫生救护证委外培训取证项目（重新招标）
                            建设工程_开标结果公示
                            http://www.hzctc.cn/OpenBidRecord/Index?id=9639C99D-121F-4FDF-80DF-5CCD339E2977&tenderID=b6cebf27-2049-4782-9ef5-df6e9a920426&ModuleID=486
                        infoid=  反例：
                            安市城西嘉园一期、忆江南苑无线通信室内覆盖项目（重）招标公告
                            其他交易_交易公告
                            http://122.228.89.242/TPFrontNew/infodetail/?infoid=cc5893aa-39ab-4adc-be8b-1e55cb630af2
                    """

                    """
                    id=  类型举例：
                        1)项目名称：观海卫镇水产养殖场（淹浦、师桥）养殖尾水治理工程中标结果公告
                        2)类别：其他交易-交易结果
                        3)从列表页跳转的url(查看原文)(GET):http://www.zjpubservice.com/jyxxgk/002007/002007002/20201118/edc99ffd-00ab-4fdc-a296-e2b164ce2bb9.html
                        4)从3)中提取的url(GET):http://cs3.bidding.gov.cn:6062/platform//project/notice/notice.jsp?id=03791bfc4968af7b3edffb1fb8d70e8d
                        5)4)中真正含有内容的url(POST):http://cs3.bidding.gov.cn:6062/platform/noticeController.do?getNotice
                            此时post请求参数时id
                    """
                    id = re.findall(r"id=(.*)", response.meta['origin_url'])[0]
                    yield scrapy.FormRequest(
                        url="http://cs3.bidding.gov.cn:6062/platform/noticeController.do?getNotice",
                        callback=self.parse_item,
                        headers={
                            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                                          " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
                        },
                        formdata={
                            "id": f"{id}",
                        },
                        meta=response.meta,
                        dont_filter=True,
                    )
                elif "id=" in response.meta['origin_url'] and "&tenderID" in response.meta['origin_url']:
                    response.meta['p_html'] = response.text
                    openID, tenderID = re.findall(r"id=(.*?)&tenderID=(.*)&",response.meta['origin_url'])[0]
                    yield scrapy.FormRequest(
                        url="https://www.hzctc.cn/OpenBidRecord/main_reflesh",
                        headers={
                            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                                          " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
                        },
                        formdata={
                            "openID":openID,
                            "tenderID":tenderID
                        },
                        callback=self.parse_item,
                        dont_filter=True,
                        meta=response.meta
                    )
                else:
                    yield scrapy.Request(
                        url=response.meta['origin_url'],
                        callback=self.parse_item,
                        headers={
                            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                                          " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
                        },
                        meta=response.meta,
                        dont_filter=True,
                    )
        elif ".pdf" in response.text:
            yield from self.handle_pdf_content(response)
        else:
            yield from self.parse_item(response)

    def handle_pdf_content(self, response):
        if ".pdf" in response.text and "附件：" not in response.text:
            response.meta['html'] = response.text
            response.meta['is_pdf'] = 1
            src = re.findall(r'src="(.*?\.pdf)"', response.text)
            href = re.findall(r'href="(/zhejiang.*?\.pdf)"', response.text)
            if href:
                response.meta['html'] = response.text
                response.meta['pdf_url'] = "http://www.zjpubservice.com" + href[0]
            elif src:
                response.meta['html'] = response.text
                response.meta['pdf_url'] = src[0]
            else:
                del response.meta['is_pdf']
            if "pdf_url" in response.meta:
                url = response.meta['pdf_url']
            else:
                url = response.meta['origin_url']
            yield scrapy.Request(
                url=url,
                headers={
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                                  " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
                },
                callback=self.parse_item,
                meta=response.meta,
            )
        else:
            yield from self.parse_item(response)
