import re
from datetime import datetime, timedelta

import scrapy
from scrapy import Selector
from scrapy.http import Request
from selenium import webdriver
from webdriver_manager.microsoft import EdgeChromiumDriverManager

from sina.items import SinaItem


class SinaSpiderSpider(scrapy.Spider):
    name = "sina_spider"

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.start_urls = ['https://news.sina.com.cn/china/']
        self.driver_path = EdgeChromiumDriverManager().install()
        self.capabilities = {
            "browserName": "MicrosoftEdge",
            "version": "",
            "platform": "WINDOWS",
            "ms:edgeOptions": {
                "extensions": [],
                "args": ["--blink-settings=imagesEnabled=false"]  # 添加最大化窗口运作参数
            }
        }

    def start_requests(self):
        for url in self.start_urls:
            yield Request(url=url, callback=self.parse)

    def parse(self, response, **kwargs):
        driver = webdriver.Edge(executable_path=self.driver_path, capabilities=self.capabilities)
        driver.set_page_load_timeout(30)
        driver.get(response.url)

        for i in range(5):
            while not driver.find_element_by_xpath("//div[@class='feed-card-page']").text:
                driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")

            title = driver.find_elements_by_xpath("//h2[@class='undefined']/a[@target='_blank']")
            time = driver.find_elements_by_xpath(
                "//h2[@class='undefined']/../div[@class='feed-card-a " "feed-card-clearfix']/div[@class='feed-card-time']")
            for i in range(len(title)):
                href = title[i].get_attribute('href')
                item = SinaItem()
                item['type'] = 'news'
                item['title'] = title[i].text
                item['times'] = self.convert_time(time[i].text)

                print(title[i].text)
                print(self.convert_time(time[i].text))

                yield Request(url=response.urljoin(href), meta={'name': item}, callback=self.parse_namedetail)

            try:
                driver.find_element_by_xpath("//div[@class='feed-card-page']/span[@class='pagebox_next']/a").click()
            except:
                break

    def parse_namedetail(self, response):
        selector = Selector(response)
        desc = selector.xpath("//div[@class='article']/p/text()").extract()
        item = response.meta['name']
        desc = list(map(str.strip, desc))
        item['desc'] = ''.join(desc)
        # print(item)
        yield item

    @staticmethod
    def convert_time(time_text):
        today = datetime.now()
        each_time = time_text
        each_time = each_time.replace('今天', str(today.month) + '月' + str(today.day) + '日')
        if '分钟前' in each_time:
            minute = int(each_time.split('分钟前')[0])
            t = datetime.now() - timedelta(minutes=minute)
            return datetime(year=t.year, month=t.month, day=t.day, hour=t.hour, minute=t.minute)
        else:
            if '年' not in each_time:
                each_time = str(today.year) + '年' + each_time
            t1 = re.split('[年月日:]', each_time)
            return datetime(year=int(t1[0]), month=int(t1[1]), day=int(t1[2]), hour=int(t1[3]),
                            minute=int(t1[4]))
