import os
import time
import re
from urllib import parse
import json
from datetime import datetime

import scrapy
from scrapy import Request

from mouse import move, click
from selenium import webdriver

from ArticleSpider.utils import common
from ArticleSpider.items import CnblogsArticleItem, ArticlespiderItemLoader

class CnblogsSpider(scrapy.Spider):
    name = "cnblogs"
    allowed_domains = [
                        "news.cnblogs.com","img2023.cnblogs.com","img2024.cnblogs.com"
                        "img2022.cnblogs.com","img2020.cnblogs.com","img2021.cnblogs.com",
                        "img2018.cnblogs.com","img2019.cnblogs.com","images2017.cnblogs.com",
                        "images2016.cnblogs.com","images2015.cnblogs.com","images2014.cnblogs.com",
                        "images0.cnblogs.com"
                       ]
    start_urls = ["https://news.cnblogs.com"]


    def  start_requests(self):
        # 已经登录的情况下，直接访问首页


        # 配置启动控制浏览器
        """
        1.启动chrome浏览器[把chrome浏览器做成一个服务]
        chrome.exe --remote-debugging-port=9222
        注释：需要关闭所有的chrome浏览器实例，
        验证是否启动成功：http://localhost:9222/json
        2.scrapy链接到chrome浏览器
        """
        from selenium.webdriver.chrome.service import Service
        from selenium.webdriver.chrome.options import Options
        from selenium.webdriver.common.keys import Keys
        from selenium.webdriver.common.by import By

        chrome_options = Options()
        # chrome_options.add_argument("--headless")  # 如果需要无界面运行，可以添加这个参数
        chrome_options.add_argument("--disable-extensions")
        chrome_options.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
        chromedriver_path = os.path.abspath("chromedriver/chromedriver.exe")
        service = Service(chromedriver_path)
        browser = webdriver.Chrome(service=service, options=chrome_options)
        # # 进入对应的网站
        browser.get("https://account.cnblogs.com/signin")

        try:
            browser.maximize_window()
        except Exception as e:
            pass

        login_success = False
        # time.sleep(3)
        try:
            username = "920919805@qq.com"
            password = "opq8225818.123"
            username_input = browser.find_element(By.ID, "mat-input-0")
            password_input = browser.find_element(By.ID, "mat-input-1")

            # 清空并输入账号
            username_input.send_keys(Keys.CONTROL + "a")
            username_input.send_keys(username)

            # 清空并输入密码
            password_input.send_keys(Keys.CONTROL + "a")
            password_input.send_keys(password)

            # 点击登录按钮
            login_button = browser.find_element(By.CSS_SELECTOR, "body > app-root > app-sign-in-layout > div > div > app-sign-in > app-content-container > div > div > div > form > div > button")
            login_button.click()
            # time.sleep(1)
            check_button = browser.find_element(By.ID, "SM_BTN_WRAPPER_1")
            check_button.click()
            # 打印当前页面的 HTML
            # print("Current page HTML:")
            # print(browser.page_source)
            login_success = True
        except:
            is_login = browser.find_element(By.ID, "loginOut")
            if is_login:
                print("登录成功")
                login_success = True
        finally:
            if login_success:
                cookies = browser.get_cookies()
                browser.quit()

        if cookies:
            scrapy_cookies = {cookie['name']: cookie['value'] for cookie in cookies}
            return [
                scrapy.Request(
                    url=self.start_urls[0],
                    dont_filter=True,
                    cookies=scrapy_cookies
                )
            ]


    def closed(self, reason):
        start_time = self.crawler.stats.get_value('start_time')
        # 获取当前时间
        end_time = datetime.now()

         # 转换为无时区的 datetime（naive datetime）
        if start_time.tzinfo is not None:
            start_time = start_time.replace(tzinfo=None)
        if end_time.tzinfo is not None:
            end_time = end_time.replace(tzinfo=None)
            
        run_time = (end_time - start_time).total_seconds()
        self.logger.info(f"Spider runtime: {run_time:.2f} seconds")

        

    def parse(self, response):
        post_nodes = response.css("#news_list .news_block")
        for post_node in post_nodes:
            image_url = post_node.css(".entry_summary a img::attr(src)").extract_first("")
            post_url = post_node.css("h2 a::attr(href)").extract_first("")
            yield Request(url=parse.urljoin(response.url, post_url), meta={"front_image_url": image_url}, callback=self.parse_detail)
        # 寻找下一页
        # next_item = response.css("#sideleft div.pager a:last-child::text").extract_first("")
        # if next_item == "Next >":
        #     next_url = response.css("#sideleft div.pager a:last-child::attr(href)").extract_first("")
        #     if next_url:
        #         yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)
        # 寻找下一页方法优化
        # next_url = response.xpath("//a[contains(text(),'Next >')]/@href").extract_first("")
        # if next_url:
        #     yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)



    def parse_detail(self, response):
        match_re = re.match(".*?(\d+)", response.url)
        if match_re:
            post_id = match_re.group(1)
            # 通过item loader加载item
            item_loader = ArticlespiderItemLoader(item=CnblogsArticleItem(), response=response)
            item_loader.add_value("url", response.url)
            item_loader.add_value("url_object_id", common.get_md5(response.url))
            item_loader.add_css("title", "#news_title a::text")
            item_loader.add_css("praise_nums", "#digg_count::text")
            item_loader.add_css("comment_nums", "#cmt_count::text")
            item_loader.add_css("fav_nums", ".fav::text")
            item_loader.add_css("tags", ".news_tags a::text")
            # item_loader.add_css("content", "#news_content")
            item_loader.add_css("create_date", "#news_info .time::text")
            front_image_url = response.meta.get("front_image_url", "")  # 文章封面图
            # 补全协议相对URL
            if front_image_url.startswith("//"):
                front_image_url = "https:" + front_image_url
            elif front_image_url not in ["", None]:
                front_image_url = parse.urljoin(response.url,front_image_url)
            item_loader.add_value("front_image_url", [front_image_url])

            yield Request(url=parse.urljoin(response.url, "/NewsAjax/GetAjaxNewsInfo?contentId={}".format(post_id)),meta={"article_item": item_loader}, callback=self.parse_nums)

    def parse_nums(self, response):
        j_data = json.loads(response.text)
        item_loader = response.meta.get("article_item","")
        item_loader.add_value("praise_nums", j_data["DiggCount"])
        item_loader.add_value("comment_nums", j_data["CommentCount"])
        item_loader.add_value("fav_nums", j_data["TotalView"])
        article_item = item_loader.load_item()
        yield article_item