# -*- coding: utf-8 -*-

from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import  Rule
from scrapy_redis.spiders import RedisCrawlSpider
from .. import utils, items, LinkExtractorRule
import re

#docker run --name redis-test -i -t -p 0.0.0.0:9001:6379 -d redis /bin/bash   docker后台运行redis命令

class SpiderSpider(RedisCrawlSpider):
    name = 'spider'
    allowed_domains = ['www.douban.com']
    # start_urls = ['https://www.douban.com/group/topic/124785371/']
    # start_urls = ['https://www.douban.com/group/explore']
    redis_key = "douban_spider:start_urls"

    # 动态域范围获取
    # def __init__(self, *args, **kwargs):
    #     # Dynamically define the allowed domains list.
    #     domain = kwargs.pop('domain', '')
    #     print(kwargs)
    #     #print("------------------" + kwargs)
    #     self.allowed_domains = filter(None, domain.split(','))
    #     super(SpiderSpider, self).__init__(*args, **kwargs)

    # 翻页规则，
    page_link = LinkExtractor(process_value=LinkExtractorRule.page_process_value)
    # 每一页中的讨论话题
    topic_link = LinkExtractor(process_value=LinkExtractorRule.topic_process_value)
    # 小组规则
    group_link = LinkExtractor(process_value=LinkExtractorRule.group_process_value)
    # 评论翻页
    comment_link = LinkExtractor(process_value=LinkExtractorRule.comment_process_value)


    # 定义匹配规则，callback不可以使用parse()
    rules = (
        Rule(page_link, follow=True),
        Rule(topic_link, callback="parse_topic", follow=True),
        Rule(group_link, callback="parse_group", follow=False),
        Rule(comment_link, callback="parse_comment", follow=True)
    )

    def parse_topic(self, response):
        topic_item = items.TopicItem()
        id = re.compile(r"/(\d+)/").search(response.url).group().replace("/", "")
        topic_item["id"] = id
        topic_item["title"] = utils.is_None(response.xpath("//*[@class=\"tablecc\"]/text()"))[0]
        topic_item["person_name"] = utils.is_None(response.xpath("//*[@class=\"from\"]/a/text()"))[0]
        topic_item["content"] = utils.is_None(response.xpath("//*[@class=\"topic-content\"]"))[0]
        topic_item["group_id"] = re.compile(r"/group/\S+/\?ref=sidebar").search(utils.is_None(response.xpath("//*[@class=\"group-item\"]/div[@class=\"info\"]/div[@class=\"title\"]/a/@href"))[0]).group().replace("/group/", "").replace("/?ref=sidebar", "")
        yield topic_item
        comment_item_list = self.get_comments(response, id)
        for comment_item in comment_item_list:
            yield comment_item

    def parse_group(self, response):
        group_item = items.GroupItem()
        group_item["id"] = re.compile(r"/(\d+)/").search(response.url).group().replace("/", "")
        group_item["name"] = utils.is_None(response.xpath("//*[@id=\"group-info\"]/div/h1/text()"))[0]
        group_item["leader"] = utils.is_None(response.xpath("//*[@id=\"content\"]/div[@class=\"grid-16-8 clearfix\"]/div[@class=\"article\"]/div[@class=\"group-board\"]/p/a/text()"))[0]
        time = utils.is_None(response.xpath("//*[@id=\"content\"]/div[@class=\"grid-16-8 clearfix\"]/div[@class=\"article\"]/div[@class=\"group-board\"]/p/text()"))[0]
        group_item["time"] = re.compile("\d+\-\d+\-\d+").search( time if time!=""  else "0000-00-00").group()
        group_item["content"] = utils.is_None(response.xpath("//*[@id=\"content\"]/div[@class=\"grid-16-8 clearfix\"]/div[@class=\"article\"]/div[@class=\"group-board\"]/div[@class=\"group-intro\"]"))[0]
        yield group_item

    def parse_comment(self, response):
        topic_id = re.compile(r"/(\d+)/").search(response.url).group().replace("/", "")
        item_list = self.get_comments(response, topic_id)
        for item in item_list:
            yield item

    def get_comments(self, response, topic_id):
        item_list = []
        ul = response.xpath("//*[@id=\"comments\"]")
        li_list = ul.xpath("./li")
        for li in li_list:
            item = items.CommentItem()
            item["id"] = utils.is_None(li.xpath("./@data-cid"))[0]
            item["person_name"] = utils.is_None(li.xpath("./div[@class=\"reply-doc content\"]/div[@class=\"bg-img-green\"]/h4/a/text()"))[0]
            item["content"] = utils.is_None(li.xpath("./div[@class=\"reply-doc content\"]/p/text()"))[0]
            item["topic_id"] = topic_id
            item_list.append(item)
        return item_list

