# -*- coding: utf-8 -*-
import datetime
import json
from urllib import parse

import scrapy
import re
import redis
from scrapy.http import Request
from DataSpider.items import WuYiCtoItem, ArticalItemLoader
from utils.common import get_md5
from selenium import webdriver
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals

redis_cli = redis.StrictRedis(decode_responses=True)


class WuYiCTOSpider(scrapy.Spider):
    name = '51cto'
    # allowed_domains = ['news.51cto.com']
    start_urls = ['https://news.51cto.com/']

    def parse(self, response):
        """
                   1. 获取文章列表页中的文章url并交给scrapy下载后并进行解析
                   2. 获取下一页的url并交给scrapy进行下载， 下载完成后交给parse
                   """
        # 解析列表页中的所有文章url并交给scrapy下载后并进行解析
        post_nodes = response.css(".home-left-list .rinfo a:not(.tag)::attr(href)").extract()
        img_urls = response.css(".home-left-list div a img::attr(src)").extract()

        for i in range(len(img_urls)):
            yield Request(url=parse.urljoin(response.url, post_nodes[i]), meta={"front_image_url": img_urls[i]},
                          callback=self.parse_detail)

        # 提取下一页并交给scrapy进行下载
        page = 1
        next_url = 'https://www.51cto.com/php/get_channel_recommend_art_list.php?callback=nextContent&page=' + str(
            340) + '&type_id=558&type=recommend'
        if next_url:
            yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse_more_detail)

    def parse_more_detail(self, response):

        str1 = response.text.encode("utf-8").decode("unicode-escape")
        regex = r"\\"
        str = re.sub(regex, "", str1)
        str2 = str[12:-1]
        try:
            jsonObj = json.loads(str2, strict=False)
            next_urls = [jsonObj[0][i]["url"] for i in range(len(jsonObj[0]))]
            img_urls = [jsonObj[0][i]["picname"] for i in range(len(jsonObj[0]))]
        except Exception as e:
            print("json异常：" + e)

        for i in range(len(img_urls)):
            yield Request(url=next_urls[i], meta={"front_image_url": img_urls[i]}, callback=self.parse_detail)

        # page =redis_cli.get("page")
        # if jsonObj[1] == 1:
        #     next_url = 'https://www.51cto.com/php/get_channel_recommend_art_list.php?callback=nextContent&type_id=558&type=recommend&page=' + page
        #     redis_cli.incr("page")
        #     print("当前获取到的页码：" + redis_cli.get("page"))
        #     yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse_more_detail)
        # else:
        #     redis_cli.set("page",2)
        #     print("没有更多，爬取完毕！")
        #     return

    def parse_detail(self, response):
        # front_image_url = response.meta.get("front_image_url","") #文章封面图
        title = response.css(".wznr h2::text").extract_first("")
        # create_time = response.css(".wznr dl dt em::text").extract_first("")
        author = response.css(".wznr dl dt span::text").extract_first()
        # match_re = re.match(".*[：](.*)", author)
        # if match_re:
        #     author = match_re.group(1)
        origin = response.css(".wznr dl dt span a::text").extract_first()
        origin_url = response.css(".wznr dl dt span a::attr(href)").extract_first()
        # praise_num = int(response.css(".zhan span::text").extract()[0])
        content = response.xpath("//div[@class='zwnr']/p/text()").extract()
        # contents = ""
        # for i in range(0, len(content) - 1):
        #     contents += "    " + content[i] + "\n"
        #
        # origin_url = response.css(".wznr dl dt span a::attr(href)").extract_first()
        # origin = response.css(".wznr dl dt span a::text").extract_first()
        # article_item["front_image_url"] = [front_image_url]
        # article_item["title"] = title
        # article_item["url"] = response.url
        # article_item["url_object_id"] = get_md5(response.url)
        # try:
        #     create_datetime = datetime.datetime.strptime(create_time,"%Y-%m-%d %H:%M:%S").date()
        # except Exception as a:
        #     create_datetime = datetime.datetime.now()
        # article_item["create_time"] = create_datetime
        # article_item["author"] = author
        # article_item["origin"] = origin
        # article_item["origin_url"] = origin_url
        # article_item["praise_num"] = praise_num
        # article_item["content"] = contents

        # 通过ItemLoader加载item

        article_item = WuYiCtoItem()
        item_loader = ArticalItemLoader(item=article_item, response=response)
        item_loader.add_value("url", response.url)
        item_loader.add_value("front_image_url", response.meta.get("front_image_url", ""))
        item_loader.add_value("url_object_id", get_md5(response.url))
        if title is None:
            item_loader.add_value("title", "这是一篇有点问题的文章！！！")
        else:
            item_loader.add_css("title", ".wznr h2::text")

        item_loader.add_css("create_time", ".wznr dl dt em::text")
        if author is None:
            item_loader.add_value("author", "作者：佚名作者")
        else:
            item_loader.add_css("author", ".wznr dl dt span::text")

        if origin is None:
            item_loader.add_value("origin", "51在线新闻")
        else:
            item_loader.add_css("origin", ".wznr dl dt span a::text")

        if origin_url is None:
            item_loader.add_value("origin_url", "url_is_null")
        else:
            item_loader.add_css("origin_url", ".wznr dl dt span a::attr(href)")
        item_loader.add_css("praise_num", ".zhan span::text")
        if content is None:
            list1 = ["暂时无法显示内容...", "请稍后再试！！！"]
            item_loader.add_value("content", list1)
        else:
            item_loader.add_xpath("content", "//div[@class='zwnr']/p/text()")

        item_loader.add_css("tags", ".main .main_left .share5 .underline::text")
        article_item = item_loader.load_item()

        yield article_item
