# encoding: utf-8
# @File  : zhihu.py
# @Author: Ac
# @Date  : 2024/12/03/23:31
# @Note  : 搜集知乎文章
import re

import scrapy
import time
from scrapy.loader import ItemLoader
from ArticleSpider.utils import zhihu_loger
from ArticleSpider.settings import ZHIHU_USER_NAME, ZHIHU_USER_PASSWORD
from urllib import parse
from ArticleSpider.items import ZhihuQuestionItem, ZhihuAnswerItem


class ZhihuSpider(scrapy.Spider):
    """
    知乎爬虫
    """
    name = 'zhihu'
    allowed_domains = ['www.zhihu.com']
    start_urls = ['https://www.zhihu.com/']

    @classmethod
    def start_requests(self):
        zl = zhihu_loger.Login(ZHIHU_USER_NAME, ZHIHU_USER_PASSWORD, 7)
        cookies = zl.login()
        print(cookies)

        headers = {
            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0'
        }
        self.headers = headers
        self.cookies = cookies
        for url in self.start_urls:
            yield scrapy.Request(url, cookies=cookies, headers=headers, dont_filter=True)

    def parse(self, response):
        """
        :param response:
        :return:
        """
        # <a target="_blank"
        # data-za-detail-view-element_name="Title"
        # data-za-detail-view-id="2812"
        # href="//www.zhihu.com/question/552294083/answer/2967843106">
        # 为什么苹果贵没人喷，华为贵一群人喷？</a>
        urls = response.css("a::attr(href)").extract()
        all_urls = [parse.urljoin(response.url, url) for url in urls]
        all_urls = filter(lambda x: True if x.startswith('https') else False, all_urls)

        for url in urls:
            # 提取指定域名url
            request_url = ""
            question_id = ""
            print(url)
            match_re = re.match("(.*zhihu.com/question/(\d+))(/|$).*", url)
            if match_re:
                request_url = match_re.group(1)
                question_id = match_re.group(2)
                print(request_url)
                print(question_id)
                if request_url.startswith("https"):
                    request_url = "https:" + request_url

            yield scrapy.Request(url=request_url,
                                 callback=self.parse_detail,
                                 headers=self.headers,
                                 meta={
                                     "question_id": question_id
                                 })

    def parse_detail(self, response):
        """
        处理详情页
        :param response:
        :return:
        """
        zhihu_question_loader = ItemLoader(item=ZhihuQuestionItem(), response=response)
        zhihu_question_loader.add_css("title", "h1.QuestionHeader-title::text")
        # div.QuestionRichText.QuestionRichText--expandable
        zhihu_question_loader.add_css("content", "div.QuestionRichText.QuestionRichText--expandable")
        zhihu_question_loader.add_value("url", response.url)
        zhihu_question_loader.add_value("zhihu_id", int(response.meta.get("question_id")))

        # class ="QuestionMainAction ViewAll-QuestionMainAction"
        # #root > div > main > div > div > div.Question-main > div.ListShortcut > div > div:nth-child(1) > a
        # zhihu_question_loader.add_css("answer_num", "a.QuestionMainAction.ViewAll-QuestionMainAction:first-child::text")
        zhihu_question_loader.add_css("answer_num", "div.ListShortcut > div > div:nth-child(1) > a::text")
        zhihu_question_loader.add_css("comments_num", "div.QuestionHeader-Comment>button::text")
        # div.QuestionHeader-side > div > div > div > button > div > strong
        zhihu_question_loader.add_css("watch_user_num",
                                      "div.QuestionHeader-side > div > div > div > button > div > strong::text")
        # div.QuestionHeader-side > div > div > div > div > div > strong
        zhihu_question_loader.add_css("click_num",
                                      "div.QuestionHeader-side > div > div > div > div > div > strong::text")
        # .TopicLink>div::text
        zhihu_question_loader.add_css("topics", ".TopicLink>div::text")
        zhihu_question_loader.add_value("crawl_time", None)

        question_item = zhihu_question_loader.load_item()
        print(question_item)
        pass
