import scrapy
from commonresources.inner_utils.standardize_field_utils import check_city_field
from commonresources.spiders.basespider import BaseSpider
from commonresources.spider_items.guangdong.items import GuangDongShengZhaoBiaoTouBiaoJianGuanWangItem
from scrapy.http import Request
import requests
from spidertools.utils.time_utils import get_current_date
import re
# from commonresources.project_html_pipline import ProjectHtmlPipeline

class GuangDongShengZhaoBiaoTouBiaoJianGuanWangSpider(BaseSpider):
    """
        广东省招标投标监管网  http://zbtb.gd.gov.cn/
    """
    name = 'GuangDongShengZhaoBiaoTouBiaoJianGuanWang2'
    name_zh = "广东省招标投标监管网"
    province = "广东"
    # allowed_domains = ['zbtb.gd.gov.cn']
    # start_urls = ["http://zbtb.gd.gov.cn/bid/listZgysgg"]
    # start_urls = ['http://zbtb.gd.gov.cn/login']
    # start_urls = ['http://zbtb.gd.gov.cn/bid?type=zbjg']

    def __init__(self, full_dose=False):
        super(GuangDongShengZhaoBiaoTouBiaoJianGuanWangSpider, self).__init__(full_dose)
        self.urls = {
                    "资格预审公告": ["http://zbtb.gd.gov.cn/bid/listZgysgg", 'zgysgg'],
                    "招标公告": ["http://zbtb.gd.gov.cn/bid/listZbgg", 'zbgg'],
                    "投标文件": ["http://zbtb.gd.gov.cn/bid/listTbwj", 'tbwj'],
                    "评标报告": ["http://zbtb.gd.gov.cn/bid/listPbbg", 'pbbg'],
                    "中标结果": ["http://zbtb.gd.gov.cn/bid/listZbjg", 'zbjg'],
                     }

    def parse(self, response):
        pass
        # # print(f)
        # headers = {
        #     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        #     'Accept-Encoding': 'gzip, deflate',
        #     'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        #     'Cache-Control': 'max-age=0',
        #     'Connection': 'keep-alive',
        #     'Cookie':' __jsluid_h=49fcfbed5a56475979929d31eb04f33a; _gscu_1834442730=09298511j2yqtu13; Hm_lvt_d7682ab43891c68a00de46e9ce5b76aa=1609896334,1610001786,1610001998,1610010590; tabmode=1; _gscbrs_1834442730=1; tl.session.id=96c058ef2c284db8a3122235bc3b07ef; JSESSIONID=7DA7FA6B8024E415092EEBED40BF704F; _gscs_1834442730=t10088481poci2p45|pv:2',
        #     'Host': 'zbtb.gd.gov.cn',
        #     'Referer': 'http://zbtb.gd.gov.cn/login',
        #     'Upgrade-Insecure-Requests': '1',
        #     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66',
        # }
        # # 获取公告类型及其他数据
        # data = response.xpath('//div[@id="cencol"]/div[@class="stepzb"]/table//td/a')
        # # print(data.extract())
        # #取到每一个具体类型
        # for announcement_types in data:
        #     # print(announcement_types)
        #     #进行解析并从返回的列表中取到数据
        #     announcement_type = announcement_types.xpath('./text()').extract_first().strip()
        #     # print(announcement_type, len(announcement_type))
        #     if announcement_type in self.urls:
        #         #获取公告地址
        #         url = self.urls[announcement_type]
        #         #获取formdata里的类型
        #         f_type = self.urls[announcement_type].split("list")[-1].lower()
        #         # print(announcement_type, url, f_type)
        #         #定义初始页数
        #         page = 1
        #         yield scrapy.FormRequest(url=url, callback=self.handle_response, dont_filter=True, headers=headers,
        #                                  formdata= {"draw": "1",
        #                                             "columns[0][data]": "id",
        #                                             "columns[0][name]": "",
        #                                             "columns[0][searchable]": "true",
        #                                             "columns[0][orderable]": "false",
        #                                             "columns[0][search][value]": "",
        #                                             "columns[0][search][regex]": "false",
        #                                             "start": '0',
        #                                             "length": "20",
        #                                             "search[value]": "",
        #                                             "search[regex]": "false",
        #                                             "page": "1",
        #                                             "type":  f"{f_type}",
        #                                             "xmmc": "",
        #                                             "rows": "20"},
        #                                  meta={"announcement_type": announcement_type,
        #                                        "page": page,
        #                                        "type": f_type,
        #                                        "url": url,
        #                                        "start_page": 0,
        #                                        "need_break": False})

    def start_requests(self):
        # print(f)
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Cookie': ' __jsluid_h=49fcfbed5a56475979929d31eb04f33a; _gscu_1834442730=09298511j2yqtu13; Hm_lvt_d7682ab43891c68a00de46e9ce5b76aa=1609896334,1610001786,1610001998,1610010590; tabmode=1; _gscbrs_1834442730=1; tl.session.id=96c058ef2c284db8a3122235bc3b07ef; JSESSIONID=7DA7FA6B8024E415092EEBED40BF704F; _gscs_1834442730=t10088481poci2p45|pv:2',
            'Host': 'zbtb.gd.gov.cn',
            'Referer': 'http://zbtb.gd.gov.cn/login',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66',
        }
        for annnoucement_type, value in self.urls.items():
            yield scrapy.FormRequest(url=value[0], callback=self.handle_response, dont_filter=True, headers=headers,
                                     formdata={"draw": "1",
                                               "columns[0][data]": "id",
                                               "columns[0][name]": "",
                                               "columns[0][searchable]": "true",
                                               "columns[0][orderable]": "false",
                                               "columns[0][search][value]": "",
                                               "columns[0][search][regex]": "false",
                                               "start": '0',
                                               "length": "20",
                                               "search[value]": "",
                                               "search[regex]": "false",
                                               "page": "1",
                                               "type": f"{value[1]}",
                                               "xmmc": "",
                                               "rows": "20"},
                                     meta={"announcement_type": annnoucement_type,
                                           "page": 1,
                                           "type": value[1],
                                           "url": value[0],
                                           "start_page": 0})

    def handle_response(self, response):
        announcement_type = response.meta["announcement_type"]
        f_type = response.meta["type"]
        url = response.meta["url"]
        for data in response.json()["data"]:
            # print(data)
            item = {}
            # 发布时间
            item["release_time"] = data["publishdate"]

            # elif item['release_time'] < "2015-12-31":  # 五年
            #     response.meta['need_break'] = True
            item["project_city"] = check_city_field(data["szdq"]) #所属城市
            item["announcement_title"] = data["bidSectionName"] #标题
            #id
            announcement_id = data["id"]
            #类型
            item['announcement_type'] = data["datatype"]
            #pdf
            item['is_pdf'] = 1
            origin_url = "http://zbtb.gd.gov.cn/bid/detail" + data["datatype"].title() + "?id=" + announcement_id
            print(origin_url)
            item["origin_url"] = origin_url
            # yield scra
            #

            res = requests.get(origin_url,headers=self.fake_header())#详情页源码
            res.encoding = "utf-8"
            item['item']["html"] = res.text  # 详情页
            # print(res.text)

            item["item"] = GuangDongShengZhaoBiaoTouBiaoJianGuanWangItem()
            # print(item)

            if data["datatype"] == 'tbwj':
                target_url = "http://zbtb.gd.gov.cn/content/contentTbwj/fileList"
                data = {"id": f"{announcement_id}"}
            else:
                target_url = "http://zbtb.gd.gov.cn/platform/attach/getAttachList"
                data = {"parentId": f"{announcement_id}",
                        "parentType": "GGNR",
                        "rows": "999999",
                        "page": "1"}
            # 获取pdf下载链接的id
            yield scrapy.FormRequest(url=target_url,  #
                                     callback=self.handle_response2,
                                     # headers=self.fake_headers(1),
                                     dont_filter=True,
                                     formdata=data,
                                     meta=item)

        total_page = int(response.json()["recordsTotal"]) // 20 + 1
        page = response.meta["page"] + 1
        start_page = response.meta["start_page"]
        if page <= total_page:
            start_page += 20
            yield scrapy.FormRequest(url=url,
                                     callback=self.handle_response,
                                     headers=self.fake_header(),
                                     #不参与去重
                                     dont_filter=True,
                                     formdata={"draw": f"{page}",
                                               "columns[0][data]": "id",
                                               "columns[0][name]": "",
                                               "columns[0][searchable]": "true",
                                               "columns[0][orderable]": "false",
                                               "columns[0][search][value]": "",
                                               "columns[0][search][regex]": "false",
                                               "start": f"{start_page}",
                                               "length": "20",
                                               "search[value]": "",
                                               "search[regex]": "false",
                                               "page": f"{page}",
                                               "type": f"{f_type}",
                                               "xmmc": "",
                                               "rows": "20"},
                                     meta={"page": page,
                                           "announcement_type": announcement_type,
                                           "url": url,
                                           "type": f_type,
                                           "start_page": start_page}
                                         )
        #解析详情页的内容
    def handle_response2(self, response):
        # print('详情页解析出来的json数据：', response.json())

        for data in response.json()["data"]:
            #取到详情页的id
            page_id = data["id"]
            #压缩包或pdf下载网址
            url = "http://zbtb.gd.gov.cn/platform/attach/download?id="+page_id
            datatype = response.meta["announcement_type"]
            # content = ["资格预审公告","招标公告""投标文件""评标报告""中标结果"]
            content = ['zgysgg', 'zbgg', 'zbjg']
            if datatype in content:
                yield scrapy.Request(url=url, callback=self.parse_item_new, dont_filter=True,
                                     meta=response.meta)  # 保存文件内容del response.meta['is_pdf']
            else:
                # print(response.json()['data'])
                allfacknamelist = response.json()['data']
                for facksite in allfacknamelist:
                    if facksite["factName"][-3:] == 'pdf':
                    # print(datatype)
                        yield scrapy.Request(url=url, callback=self.parse_item_new, dont_filter=True, meta=response.meta)  # 保存文件内容
            # print('压缩文件网址', url)

    def fake_header(self, flag=0):
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "max-age=0",
            "Connection": "keep-alive",
            "Cookie": "__jsluid_h=e54c48f7570d806bad3056247c9141e5; _gscu_1834442730=09730975xey3ee17; _gscbrs_1834442730=1; JSESSIONID=DCAB64351B5A76746CF93FA088468BD9; tl.session.id=26bad49106a14fcfa80b014b70738910",
            "Host": "zbtb.gd.gov.cn",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
        }
        if flag:
            headers['Accept-Language'] = "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6"
        return headers