import time
from urllib import parse
import re
import json

import redis
import scrapy
from fake_useragent import UserAgent
from CnblogsScrapy.items import CnblogsItemLoader

from CnblogsScrapy.items import CnblogsNewsItem
from CnblogsScrapy.utils import common
from scrapy.utils.project import get_project_settings


class CnblogsSpider(scrapy.Spider):
    name = "cnblogs"
    # allowed_domains = ["news.cnblogs.com"]  # 会过滤不是这个域名的url
    start_urls = ["https://news.cnblogs.com"]
    custom_settings = {
        'COOKIES_ENABLED': True,
    }
    headers = {
        'User-Agent': UserAgent().random,
    }

    def __init__(self):
        super().__init__()
        self.settings = get_project_settings()
        self.redis_cli = redis.Redis(host=self.settings['REDIS_HOST'],
                                     port=self.settings['REDIS_PORT'],
                                     decode_responses=True)

    def start_requests(self):
        """
        登录入口，通过redis拿到cookie
        :return:
        """
        for url in self.start_urls:
            # 从redis中随机获取一个cookie并设置给Scrapy.requests
            cookies_str = self.redis_cli.srandmember("cnblogs:cookies")
            cookies_dict = json.loads(cookies_str)
            yield scrapy.Request(url, cookies=cookies_dict, headers=self.headers, dont_filter=True)

    def parse(self, response):
        """
        该方法中一般写抓取策略，不写具体解析，因为每种页面解析都不一样
        1. 获取新闻列表页的新闻URL并交给scrapy进行下载后调用相应的解析方法
        2. 获取下一页的url并交给scrapy进行下载后，继续调用parse方法
        :param response:
        :return:
        """
        news_list = response.xpath('//*[@id="news_list"]/*[@class="news_block"]')
        for news in news_list:
            front_image_url = news.xpath('.//img/@src').get("")  # 一般是绝对路径，如果没有取到，返回值是""
            if front_image_url and front_image_url.startswith('//'):
                front_image_url = "https:" + front_image_url
            news_url = news.xpath('.//h2/a/@href').get()
            yield scrapy.Request(url=parse.urljoin(response.url, news_url),
                                 meta={"front_image_url": front_image_url},
                                 callback=self.parse_news_page)
        # 提取下一页URL并交给scrapy下载
        next_url = response.xpath('.//a[contains(text(), "Next >")]/@href').get()
        if next_url:
            yield scrapy.Request(url=parse.urljoin(response.url, next_url))

    def parse_news_page(self, response):
        time.sleep(10)
        match_num = re.match(r'.*?(\d+)', response.url)
        if match_num:
            news_item = CnblogsNewsItem()

            item_loader = CnblogsItemLoader(item=news_item, response=response)
            item_loader.add_value("url", response.url)
            item_loader.add_value("id", match_num.group(1))
            item_loader.add_xpath("title", '//*[@id="news_title"]/a/text()')
            item_loader.add_xpath("create_time", '//*[@id="news_info"]/span[@class="time"]/text()')
            item_loader.add_xpath("content", '//*[@id="news_content"]')
            item_loader.add_xpath("tags", '//div[@class="news_tags"]/a/text()')
            if response.meta.get("front_image_url"):
                item_loader.add_value("front_image_url", response.meta.get("front_image_url"))

            news_item = item_loader.load_item()

            # 读取动态信息
            yield scrapy.Request(url=parse.urljoin(response.url, f"/NewsAjax/GetAjaxNewsInfo?contentId={match_num.group(1)}"),
                                 meta={"news_item": news_item},
                                 callback=self.parse_rest_info)

    def parse_rest_info(self, response):
        """
        爬取其他ajax异步加载的信息，如评论数CommentCount、点赞数DiggCount、访问数TotalView
        :param response:
        :return:
        """
        rest_info = json.loads(response.text)
        news_item = response.meta.get("news_item")
        if news_item:
            news_item['commentCount'] = rest_info['CommentCount']
            news_item['diggCount'] = rest_info['DiggCount']
            news_item['totalView'] = rest_info['TotalView']

            news_item['md5_id'] = common.get_md5(news_item['url'])
            yield news_item
