import scrapy  # io异步框架，没有多线程、没有消息队列
from scrapy import Selector, Request
from scrapy.loader import ItemLoader
import os
import ssl
from urllib import parse
import requests
import re
import json

from ArticleSpider.items import CnBlogsArticleItem, ArticleItemLoader
from ArticleSpider.utils import common

# 解决SSL访问证书失效
ssl._create_default_https_context = ssl._create_unverified_context


class CnblogsSpider(scrapy.Spider):
    name = 'cnblogs'
    allowed_domains = ['news.cnblogs.com']
    start_urls = ['http://news.cnblogs.com/']

    custom_settings = {
        "COOKIES_ENABLED": True
    }

    def start_requests(self):
        """
        默认登录入口
        :return:
        """
        import undetected_chromedriver.v2 as uc

        project_dir = os.path.dirname(os.path.abspath(__file__))

        executable_path = "ArticleSpider/drivers/chromedriver"
        driver = uc.Chrome(
            executable_path=executable_path)
        # https://chromedriver.storage.googleapis.com/131.0.6778.86/chromedriver_mac64.zip
        # 114.0.5735.90
        driver.get("https://account.cnblogs.com/signin")
        input("回车继续")
        # https://storage.googleapis.com/chrome-for-testing-public/129.0.6668.101/mac-arm64/chrome-mac-arm64.zip
        # https://storage.googleapis.com/chrome-for-testing-public/131.0.6778.85/mac-arm64/chrome-mac-arm64.zip
        cookies = driver.get_cookies()
        cookie_dict = {}
        for cookie in cookies:
            cookie_dict[cookie['name']] = cookie['value']

        headers = {
            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0'
        }

        for url in self.start_urls:
            # cookie、header交给 scrapy，以及后续到请求会沿用之前请求中到cookie、header需要设置，默认scrapy不沿用
            yield scrapy.Request(url,
                                 headers=headers,
                                 cookies=cookie_dict,
                                 dont_filter=True)

    def parse(self, response):
        # 获取所有文章a链接，a标签到href属性值
        # xpath选择器
        # 获取所有div下所以级下h2标签中有class='news_entry'下到子级a标签中到href属性值
        # news_entry_arr = response.xpath("//div[@id='news_list']//h2[@class='news_entry']/a/@href").extract()

        # css选择器
        # news_entry_arr = response.css("div#news_list h2 a::attr(href)").extract()

        # 纯文本html内容情况下使用css、xpath语法
        html = response.text
        sel = Selector(text=html)
        # news_entry_arr = sel.css("div#news_list h2 a::attr(href)").extract()
        # divs = sel.css('div#news_list div.news_block')[:1]
        divs = sel.css('div#news_list div.news_block')

        for div in divs:
            # 获取文章图片
            img_url = div.css("div.entry_summary a img::attr(src)").extract_first("")
            if img_url.startswith("//"):
                img_url = "https:" + img_url

            # 获取文章标题
            post_url = div.css("h2.news_entry a::attr(href)").extract_first("")
            # 图片url拼接 主网站url
            url = parse.urljoin(response.url, post_url)

            # 交给scrapy处理
            yield Request(url=url,
                          meta={"front_img_url": img_url},
                          callback=self.parse_detail)

        # 获取下一页码
        #     Next >
        # next_url = response.css('div.pager a:last-child::text').extract_first('')
        # next_url = response.xpath("//div[@class='pager']//a[contains(text(),'Next >')]/@href").extract_first('')

        # if next_url == "Next >":
        #     next_url = response.css('div.pager a:last-child::attr(href)').extract_first('')
        #     url = parse.urljoin(response.url, next_url)
        #     # 交给scrapy处理
        #     yield Request(url=url, callback=self.parse)

    #
    # 解析文章详情
    def parse_detail(self, response):
        print(response.url)

        match_re = re.match(".*?(\d+)", response.url)
        if match_re:
            # article_item = CnBlogsArticleItem()
            # 获取文章ID
            post_id = 0
            post_id = match_re.group(1)

            # title = response.css('div#news_title a::text').extract_first()
            # title = response.xpath('//*[@id="news_title"]//a/text()').extract_first()
            # 2024-12-01 17:57
            # create_time = response.css('div#news_info .time::text').extract_first('')
            # ctime = response.xpath('//*[@id="news_info"]//*[@class="time"]/text()').extract_first('')
            # 年月日提取
            # match_re = re.match(".*?(\d+.*)", create_time)
            # if match_re:
            #     create_time = match_re.group(1)

            # content = response.css('div#news_content').extract()[0]
            # content = response.xpath('//*[@id="news_content"]').extract()[0]

            # tag_list = response.css('.news_tags a::text').extract()
            # tag_list = response.xpath('//*[@class="news_tags"]//a/text()').extract()

            # 处理list
            # tags = ",".join(tag_list)
            # 同步请求
            # html = requests.get(
            #     parse.urljoin(response.url, '/NewsAjax/GetAjaxNewsInfo?contentId={}'.format(post_id)))
            #
            # print(title)
            # print(ctime)
            # print(content)
            # html_json = json.loads(html.text)
            #
            # praise_nums = html_json["DiggCount"]
            # fav_nums = html_json["TotalView"]
            # comment_nums = html_json["CommentCount"]

            # article_item["title"] = title
            # article_item["create_date"] = create_time
            # article_item["content"] = content
            # article_item["tags"] = tags
            # article_item["url"] = response.url

            # 获取上一个Request中保存等Meta对象 front_img_url
            # ValueError: Missing scheme in request url: h
            # 针对要处理下载的图片url字段，需要传递[]类型
            #   处理url为空字符串
            # if response.meta.get("front_img_url", ""):
            #     article_item["front_image_url"] = [response.meta.get("front_img_url", "")]
            # else:
            #     article_item["front_image_url"] = []
            #
            # print(article_item)

            # 创建解析器,实例化空对象 CnBlogsArticleItem
            # 根据css解析起获取对应文本值

            # ArticleItemLoader 自定义 itemLoader解析器
            item_loader = ArticleItemLoader(item=CnBlogsArticleItem(), response=response)
            item_loader.add_css("title", 'div#news_title a::text')
            item_loader.add_css("content", 'div#news_content')
            item_loader.add_css("tags", '.news_tags a::text')
            item_loader.add_css("create_date", 'div#news_info .time::text')
            item_loader.add_value("url", response.url)

            # if response.meta.get("front_img_url", []):
            #     item_loader.add_value("front_image_url", [response.meta.get("front_img_url")])

            if response.meta.get("front_img_url", ""):
                item_loader.add_value("front_image_url", [response.meta.get("front_img_url")])
            else:
                item_loader.add_value("front_image_url", [])

            # load_item() 解析加载 CnBlogsArticleItem对象
            article_item = item_loader.load_item()
            print(article_item)

            # 异步请求
            yield Request(url=parse.urljoin(response.url, '/NewsAjax/GetAjaxNewsInfo?contentId={}'.format(post_id)),
                          callback=self.parse_news_ajax,
                          meta={"article_item": article_item})

            # pass

    # 异步解析获取文章阅读、收藏数等信息
    def parse_news_ajax(self, response):

        article_item = response.meta.get("article_item", CnBlogsArticleItem())

        html_json = json.loads(response.text)

        praise_num = html_json["DiggCount"]
        fav_num = html_json["TotalView"]
        comment_num = html_json["CommentCount"]

        article_item["praise_num"] = praise_num
        article_item["fav_num"] = fav_num
        article_item["comment_num"] = comment_num

        url_object_id = common.get_md5(article_item["url"])
        article_item["url_object_id"] = url_object_id

        # yield只能是两种类型 Request item
        # 默认 yield item 会进入 pipelines进行处理
        yield article_item
