# -*- coding: utf-8 -*-
"""
Created on 2021-10-13 00:50:17
---------
@summary: 分布式爬取公共管理学院
---------
@author: 大龙
"""

import subprocess
import sys
import time
from random import randint

import feapder
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys

from config import MySpiderConfig
from items.scunews_item import SCUNewsDataItem

sys.path.append('..')


def close_tab(browser):
    ActionChains(browser).key_down(Keys.CONTROL).send_keys("w").key_up(Keys.CONTROL).perform()


def run_cmd(cmd):
    subprocess.Popen(cmd)


class CsSpider(feapder.BatchSpider):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.config = MySpiderConfig()
        option = webdriver.ChromeOptions()
        option.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
        option.add_argument("--disable-blink-features")
        option.add_argument("--disable-blink-features=AutomationControlled")
        # 开启chrome debugger
        run_cmd(self.config.START_CHROME_DEBUG_CMD)

        self.browser = webdriver.Chrome(options=option)  # 实例化浏览器对象
        self.browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            "source": "Object.defineProperty(navigator, 'webdriver', {get: () => undefined})"})

    def start_requests(self, task):
        id, url, type = task
        yield feapder.Request(url, article_type=type, task_id=id)

    def parse(self, request, response):
        self.browser.get(request.url)
        ul = self.browser.find_element(By.XPATH, "/html/body/div/div[4]/div[2]/div[2]/ul")
        lis = ul.find_elements(By.TAG_NAME, 'li')
        for li in lis:
            a = li.find_element(By.TAG_NAME, 'a')
            pageUrl = a.get_attribute('href')
            js = self.config.OPEN_NEW_TAB_JS.format(url=pageUrl)
            time.sleep(1)
            self.browser.execute_script(js)
            time.sleep(1)
            self.browser.switch_to.window(self.browser.window_handles[-1])
            item = SCUNewsDataItem()
            item['D_TITLE'] = self.get_date(self.browser.find_element(By.XPATH,
                                                                    "/html/body/div/div[4]/div[2]/form/div/h2").get_attribute(
                'textContent'))
            item['D_URL'] = self.browser.current_url

            item['D_SUBDATE'] = self.browser.find_element(By.XPATH,
                                                                "/html/body/div/div[4]/div[2]/form/div/div[1]/span[1]").get_attribute(
                'textContent')
            item['D_TYPE'] = request.article_type
            # 获取文章内容
            item['D_CONTENT'] = self.browser.find_element(By.XPATH,
                                                             "//div[@class='v_news_content']").get_attribute(
                "outerHTML")
            time.sleep(1)
            # 关闭当前窗口
            self.browser.close()
            # 切换回首页
            self.browser.switch_to.window(self.browser.window_handles[-1])
            time.sleep(randint(1, 2))
            # 数据入库
            yield item
        yield self.update_task_batch(request.task_id, 1)

    def parse_news_content(self, request, response):
        js = self.config.OPEN_NEW_TAB_JS.format(url=request.url)
        time.sleep(1)
        self.browser.execute_script(js)
        time.sleep(1)
        self.browser.switch_to.window(self.browser.window_handles[-1])
        item = SCUNewsDataItem()
        item['title'] = self.get_date(self.browser.find_element(By.XPATH,
                                                                   "/html/body/div/div[4]/div[2]/form/div/h2").get_attribute(
            'textContent'))
        item['origin_url'] = self.browser.current_url

        item['submission_date'] =self.browser.find_element(By.XPATH,
                                                               "/html/body/div/div[4]/div[2]/form/div/div[1]/span[1]").get_attribute(
            'textContent')
        item['type'] = request.article_type
        # 获取文章内容
        item['content_html'] = self.browser.find_element(By.XPATH,
                                                            "//div[@class='v_news_content']").get_attribute("outerHTML")
        time.sleep(1)
        # 关闭当前窗口
        self.browser.close()
        # 切换回首页
        self.browser.switch_to.window(self.browser.window_handles[-1])
        time.sleep(randint(1, 2))
        yield item

    def get_date(self, str):
        """
        @summary:去除爬取日期前面的“发布日期：”
        --------
        @str:待去除字符串
        --------
        @result:str
        """
        return str.strip('日期：')

    def exception_request(self, request, response):
        """
        @summary: 请求或者parser里解析出异常的request
        ---------
        @param request:
        @param response:
        ---------
        @result: request / callback / None (返回值必须可迭代)
        """

        pass

    def failed_request(self, request, response):
        """
        @summary: 超过最大重试次数的request
        ---------
        @param request:
        ---------
        @result: request / item / callback / None (返回值必须可迭代)
        """
        yield request
        yield self.update_task_batch(request.task_id, -1)  # 更新任务状态为-1
