# -*- coding: UTF-8 -*-
"""
@author:zhangxing
@file:shandongshenggonggongziyuanjiaoyiwang.py
@time:2020/11/30
"""
import json
import os
import re
import subprocess
import sys
import time
import random

from selenium import webdriver
from spidertools.commone_define import chromedriver_linux_path, chromedriver_win_path
import requests
import scrapy
from spidertools.utils.time_utils import get_current_date

from commonresources.spider_items.base_item import convert_dict
from commonresources.spider_items.shandong.items import ShanDongShengGongGongZiYuanJiaoYiWangItem
from commonresources.spiders.basespider import BaseSpider
from commonresources.third_middlewares import IpDaily10MinAgent, user_agent_list


class ShanDongShengGongGongZiYuanJiaoYiWangSpider(BaseSpider):
    """
        山东省公共资源交易网
            主页：http://ggzyjy.shandong.gov.cn/
            交易信息页：http://ggzyjy.shandong.gov.cn/queryContent-jyxxgk.jspx
    """
    name = "ShanDongShengGongGongZiYuanJiaoYiWang"
    name_zh = "山东省公共资源交易网"
    province = "山东"

    # allowed_domains = ['']
    # start_urls = [""]

    def __init__(self, full_dose=False):
        super(ShanDongShengGongGongZiYuanJiaoYiWangSpider, self).__init__(full_dose)
        self.convert_dict = convert_dict
        self.driver = ''
        # 页面中提取，也可以通过解析获得
        self.type_dict = {'85': "招标/资审公告", '89': "开标记录", "149": "中标候选人公示", "87": "交易结果公告", "88": "招标/招标文件澄清", "86": "资格预审结果"}

    def parse(self, response):
        pass

    def init_chrome_driver(self):
        # print(os.path.dirname(os.path.abspath(__file__)))
        # sys.path.append(r"D:\01.工作\00.project_dev_20201008\projectspiders\commonresources\spiders\shandong")
        # if sys.platform.startswith('win'):
        #     chromedirver_path = chromedriver_win_path # './chromedriver'#chromedriver_win_path
        #     print(os.getcwd())
        # else:
        #     chromedirver_path = chromedriver_linux_path # './chromedriver'
        # options = webdriver.ChromeOptions()
        options = webdriver.ChromeOptions()
        if sys.platform.startswith('win'):
            chromedriver_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                         "../../inner_utils/driver/chromedriver.exe")
        else:
            chromedriver_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                         "../../inner_utils/driver/chromedriver")
        options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})
        options.add_experimental_option('excludeSwitches', ['enable-automation'])
        options.add_argument('--headless')
        # options.add_argument('--disable-gpu')
        # options.add_argument("--proxy-server=http://58.218.200.225:2943")  # 这里的代理也需要换
        random_ua = random.choice(user_agent_list)
        options.add_argument(f"User-Agent='{random_ua}'")
        driver = webdriver.Chrome(options=options, executable_path=chromedriver_path)
        driver.implicitly_wait(2)
        driver.get('http://ggzyjy.shandong.gov.cn/queryContent-jyxxgk.jspx?channelId=78')
        self.driver = driver

    def init_chrome_driver1(self):
        if sys.platform.startswith('win'):
            chromedirver_path =chromedriver_win_path  #  './chromedriver'# chromedriver_win_path
        else:
            chromedirver_path = chromedriver_linux_path  # './chromedriver'
        options = webdriver.ChromeOptions()
        options.add_experimental_option("prefs", {"profile.managed_default_content_settings.images": 2})
        options.add_experimental_option('excludeSwitches', ['enable-automation'])
        options.add_argument('--headless')
        options.add_argument('--disable-gpu')
        # options.add_argument("--proxy-server=http://58.218.200.225:2943")  # 这里的代理也需要换
        random_ua = random.choice(user_agent_list)
        options.add_argument(f"User-Agent='{random_ua}'")
        driver = webdriver.Chrome(options=options, executable_path=chromedirver_path)
        driver.get('http://ggzyjy.shandong.gov.cn/queryContent-jyxxgk.jspx?channelId=78')
        self.driver = driver

    def start_requests(self):
        self.init_chrome_driver()
        url = "http://ggzyjy.shandong.gov.cn/queryContent-jyxxgk.jspx"
        start_configs = [{
            "channelId": 78,
            "announcement_type": "工程建设"},  # {"channelId": 162,"announcement_type": "其他交易"}
        ]
        for obj in start_configs:
            channelid = obj['channelId']
            announcement_type = obj['announcement_type']
            yield scrapy.FormRequest(
                url=url,
                headers=self.faker_headers(),
                formdata=self.faker_formdata(channelid),
                dont_filter=True,
                callback=self.handle_response,
                meta={
                    "channelid": channelid,
                    "announcement_type": announcement_type,
                    "need_break": False,
                    "page_count": -1,
                    "page": 1
                },
            )

    def handle_response(self, response):
        if response.meta['page_count'] == -1:
            response.meta['page_count'] = int(re.findall(r"共(\d+)条记录", response.text)[0]) // 10 + 1
        objs = response.xpath('//div/ul[@class="article-list-a"]/li')
        for obj in objs:
            item = dict()
            item['release_time'] = obj.xpath('./div[@class="article-list3-t"]/div/text()').extract_first()[0:10]
            if not self.full_dose and item['release_time'] != get_current_date():
                response.request.meta['need_break'] = True
            elif item['release_time'] < "2015-12-31":
                response.meta['need_break'] = True
            else:
                item['announcement_title'] = (
                    "".join(obj.xpath('./div[@class="article-list3-t"]/a/text()').extract())).strip()
                try:
                    item['project_city'] =  obj.xpath('./div[@class="article-list3-t"]/a/label/text()').extract_first().split('【')[-1].split('】')[0]
                except:
                    pass
                origin_url = obj.xpath('./div[@class="article-list3-t"]/a/@href').extract_first()
                item['source_platform_original'] = \
                    obj.xpath('./div[@class="article-list3-t2"]/div[1]/text()').extract_first().split("：")[-1]
                item['project_type'] = \
                    obj.xpath('./div[@class="article-list3-t2"]/div[2]/text()').extract_first().split("：")[-1]
                item['announcement_type'] = \
                    obj.xpath('./div[@class="article-list3-t2"]/div[3]/text()').extract_first().split("：")[-1]
                item['origin_url'] = self.click_for_detail_page(origin_url).replace(":80", '')
                item['item'] = ShanDongShengGongGongZiYuanJiaoYiWangItem()
                yield scrapy.Request(url=item['origin_url'],
                                     callback=self.handle,
                                     dont_filter=True,
                                     headers=self.faker_headers().update({
                                         "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                                         "Accept-Encoding": "gzip, deflate",
                                         "Accept-Language": "zh-CN,zh;q=0.9",
                                         "Cache-Control": "max-age=0",
                                         "Connection": "keep-alive",
                                         "Host": "ggzyjy.shandong.gov.cn",
                                         "Upgrade-Insecure-Requests": "1",
                                     }),
                                     meta=item)
        if not response.meta['need_break']:
            page_count = response.meta['page_count']
            page = response.meta['page']
            if page < page_count:
                page += 1
                url = f"http://ggzyjy.shandong.gov.cn/queryContent_{page}-jyxxgk.jspx?channelId=78"
                channelid = response.meta['channelid']
                announcement_type = response.meta['announcement_type']
                yield scrapy.FormRequest(
                    url=url,
                    headers=self.faker_headers(),
                    formdata=self.faker_formdata(channelid),
                    dont_filter=True,
                    callback=self.handle_response,
                    meta={
                        "channelid": channelid,
                        "announcement_type": announcement_type,
                        "need_break": False,
                        "page_count": page_count,
                        "page": page
                    },
                    # errback=IpDaily10MinAgent.error_back,
                )

    def click_for_detail_page(self, num_str):
        num = num_str.split('/')[-1].split('.')[0]
        js = """
            var item = "http://ggzyjy.shandong.gov.cn:80/jsgczbgg/""" + f"{num}" + """.jhtml";
            return myFunction(item);
            function myFunction(hh) {
                var aa = hh.split("/");
                var aaa = aa.length;
                var bbb = aa[aaa - 1].split('.');
                var ccc = bbb[0];
                var cccc = bbb[1];
                var r = /^\+?[1-9][0-9]*$/;
                if (r.test(ccc) && cccc.indexOf('jhtml') != -1) {
                    var srcs = CryptoJS.enc.Utf8.parse(ccc);
                    var k = CryptoJS.enc.Utf8.parse(s);
                    var en = CryptoJS.AES.encrypt(srcs, k, {
                        mode: CryptoJS.mode.ECB,
                        padding: CryptoJS.pad.Pkcs7
                    });
                    var ddd = en.toString();
                    ddd = ddd.replace(/\//g, "^");
                    ddd = ddd.substring(0, ddd.length - 2);
                    var bbbb = ddd + '.' + bbb[1];
                    aa[aaa - 1] = bbbb;
                    var uuu = '';
                    for (i = 0; i < aaa; i++) {
                        uuu += aa[i] + '/'
                    }
                    uuu = uuu.substring(0, uuu.length - 1);
                    return uuu
                } else {
                    var ee = $(this).attr('target');
                    if (ee.typeof('undefined')) {
                        window.location = hh
                    } else {
                        window.open(hh)
                    }
                }
                return false
            }
            """
        detail_url = self.driver.execute_script(js)
        print(f"reslut 【{num_str}】>>>【 {detail_url} 】")
        return detail_url

    def handle(self, response):
        if response.status != 200:
            return
        objs = response.xpath('//ul[@class="gycq-list"]')
        for index, obj in enumerate(objs):
            if index == 0:
                yield from self.parse_item_new(response)
                continue
            try:
                item = dict()
                num_str_url = obj.xpath('./li/a/@href').extract_first()
                item['announcement_title'] = obj.xpath('./li/a/text()').extract_first()
                type_id = obj.xpath('./@id').extract_first()
                item['announcement_type'] = self.type_dict[type_id]
                item['release_time'] = obj.xpath('./li/div[@class="gycq-times"]/text()').extract_first()[0:10]
            except:  # argument of type 'NoneType' is not iterable
                continue

            item['source_platform_original'] = response.meta['source_platform_original']
            item['project_city'] = response.meta['project_city']
            item['origin_url'] = self.click_for_detail_page(num_str_url).replace(":80", '')
            item['item'] = response.meta['item']
            yield scrapy.Request(url=item['origin_url'],
                                 callback=self.parse_item_new,
                                 headers={**self.faker_headers(), **{
                                     "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                                     "Accept-Encoding": "gzip, deflate",
                                     "Accept-Language": "zh-CN,zh;q=0.9",
                                     "Cache-Control": "max-age=0",
                                     "Connection": "keep-alive",
                                     "Host":"ggzyjy.shandong.gov.cn",
                                     "Upgrade-Insecure-Requests": "1",
                                 }},
                                 dont_filter=True,
                                 meta=item)

    def faker_headers(self):
        """自行添加其他信息"""
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
                          " Chrome/86.0.4240.111 Safari/537.36 Edg/86.0.622.58",
            "Upgrade-Insecure-Requests": "1"
        }
        return headers

    def faker_formdata(self, channelId):
        return {
            "title": "",
            "origin": "",
            "inDates": "",
            "channelId": f"{channelId}",
            "ext": "",
        }
