import scrapy
import pickle
import time
import random
import re
import json
import requests
from ArticleSpider.utils import common
from ArticleSpider.items import JobBoleArticleItem
from selenium import webdriver
from urllib import parse
from ArticleSpider.items import ArticlespiderItem
from scrapy.loader import ItemLoader
from ArticleSpider.items import ArticleItemLoader


class CnbolgsSpider(scrapy.Spider):
    name = 'cnbolgs'
    allowed_domains = ['news.cnblogs.com']
    start_urls = ['https://news.cnblogs.com/']
    front_img_src = ""

    def parse(self, response):
        list_article = response.xpath('//*[@id="news_list"]/div/div[2]')
        # a_img_src = ""
        for i in list_article:
            front_img_src = i.xpath("./div[1]/a/img/@src").extract_first()
            post_url = i.css('h2 a::attr(href)').extract_first()
            yield scrapy.Request(url=parse.urljoin(response.url, post_url),
                                 meta={"front_image_url": front_img_src},
                                 callback=self.parse_detail, )
        next_src = response.xpath("//a[contains(text(),'Next >')]/@href").extract_first()
        yield scrapy.Request(url=parse.urljoin(response.url, next_src), callback=self.parse)
        # items = ArticlespiderItem(name=name, src=img_src)
        # yield items

    def cookie_data(self):
        cookiese = pickle.load(open("E:/zhuomian/project/python/ArticleSpider/cookies/boke.cookie", "rb"))
        cookie_dict = {}
        for cookie in cookiese:
            cookie_dict[cookie["name"]] = cookie["value"]
        return cookie_dict

    def start_requests(self):

        return [scrapy.Request(url=self.start_urls[0], dont_filter=False, cookies=self.cookie_data())]
        # # 这是你下载chromedriver.exe的绝对路径
        # path = "E:\zhuomian\project\python\ArticleSpider\ArticleSpider\spiders\chromedriver.exe"
        # liu = webdriver.Chrome(path)
        # liu.get("https://account.cnblogs.com/signin?returnUrl=https:%2F%2Fnews.cnblogs.com%2F")
        # # 打开登陆界面后让程序睡30秒   这个时间用自己的账号登录上！
        # time.sleep(30)
        # # 获取cookie
        # cookiese = liu.get_cookies()
        # # 将cookie保存   方便后续使用
        # pickle.dump(cookiese, open("E:/zhuomian/project/python/ArticleSpider/cookies/boke.cookie", "wb"))
        # # cookie保存到字典中  用于返回到response
        # cookie_dict = {}
        # # 取name和value的值（取重要的就可以）
        # for cookie in cookiese:
        #     cookie_dict[cookie["name"]] = cookie["value"]
        # # 返回到response
        # return [scrapy.Request(url=self.start_urls[0], dont_filter=True, cookies=cookie_dict)]

    def parse_detail(self, response):
        match_re = re.match(".*?(\d+)", response.url)
        if match_re:
            get_id = match_re.group(1)
            # article_item = JobBoleArticleItem()
            # # title = response.xpath('//div[@id="news_title"]/a/text()')
            # # create_date = response.xpath('//div[@id="news_info"]/span[2]/text()')
            sui = random.randint(1, 3)
            time.sleep(sui)
            # title = response.xpath('//div[@id="news_title"]/a/text()').extract_first()
            # create_date = response.xpath('//div[@id="news_info"]/span[2]/text()').extract_first()
            # content = response.css("#news_content").extract()[0]
            # tag_list = response.css("#news_more_info>.news_tags a::text").extract()
            # tags = ",".join(tag_list)
            # # html = requests.get(parse.urljoin(response.url, "/NewsAjax/GetAjaxNewsInfo?contentId={}".format(get_id)))
            # article_item["title"] = title
            # create_date_re = re.match(".*?(\d+.*)", create_date)
            # create_re = create_date_re.group(1)
            # article_item["create_date"] = create_re
            # article_item["content"] = content
            # article_item["tags"] = tags
            # article_item["url"] = response.url
            # # 图片类型下载一定要用list  不然会报错
            # if response.meta.get("front_image_url", ""):
            #     article_item["front_image_url"] = [response.meta.get("front_image_url", "")]
            # else:
            #     article_item["front_image_url"] = []

            item_loader = ArticleItemLoader(item=JobBoleArticleItem(), response=response)
            item_loader.add_xpath("title", '//div[@id="news_title"]/a/text()')
            item_loader.add_xpath("create_date", '//div[@id="news_info"]/span[2]/text()')
            item_loader.add_css("content", "#news_content")
            item_loader.add_css("tags", "#news_more_info>.news_tags a::text")
            item_loader.add_value("url", response.url)
            a = response.meta.get("front_image_url", "")
            if a:
                if "http" in a:
                    pass
                else:
                    a = "https:" + response.meta.get("front_image_url", "")
            item_loader.add_value("front_image_url", a)
            # article_item = item_loader.load_item()

            yield scrapy.Request(
                url=parse.urljoin(response.url, "/NewsAjax/GetAjaxNewsInfo?contentId={}".format(get_id)),
                meta={
                    "article_item": item_loader,
                    "contentId": "{}".format(get_id),
                    "url": response.url
                },
                callback=self.parse_nums, )

            pass

    def parse_nums(self, response):
        j_data = json.loads(response.text)
        item_loader = response.meta.get("article_item", "")
        # praise_nums = j_data["DiggCount"]
        # fav_nums = j_data["TotalView"]
        # comment_nums = j_data["CommentCount"]
        # article_item["praise_nums"] = praise_nums
        # article_item["fav_nums"] = fav_nums
        # article_item["comment_nums"] = comment_nums
        # article_item["url_object_id"] = common.get_md5(article_item["url"])
        item_loader.add_value("praise_nums", j_data["DiggCount"])
        item_loader.add_value("fav_nums", j_data["TotalView"])
        item_loader.add_value("comment_nums", j_data["CommentCount"])
        item_loader.add_value("url_object_id", common.get_md5(response.meta.get("url", "")))
        article_item = item_loader.load_item()
        yield article_item

        pass
