#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: xaoyaoyao
@contact: xaoyaoyao@aliyun.com
@file: zhihu_spider.py
@time: 2018/08/31
"""
import re
import json
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy.linkextractors import LinkExtractor
from article_spider.spiders.dynamic.js_spider import JSSpider
from article_spider.items import CommentItem
from article_spider.utils.tools import Tools


class ZhihuSpider(JSSpider):
    name = 'zhihu'
    allowed_domains = ['zhihu.com']
    start_urls = [
        'https://www.zhihu.com/explore/recommendations',
    ]

    def __init__(self):
        super(ZhihuSpider, self).__init__()

    def parse(self, response):
        current_url = response.url
        self.logger.info('[ZhihuSpider] This is an item page! %s', current_url)
        ## question_link
        # question_links = LinkExtractor(
        #     restrict_xpaths='//div[@id="zh-recommend-list-full"]//div[@class="zm-item"]//a[@class="question_link"]')
        # if question_links:
        #     links = question_links.extract_links(response)
        #     if links:
        #         self.logger.info('[ZhihuSpider] The links >> %s ', links)
        #         for detail_link in links:
        #             if detail_link:
        #                 yield Request(url=detail_link.url, callback=self.parse_question_detail)
        ## post_links
        post_links = LinkExtractor(
                restrict_xpaths='//div[@id="zh-recommend-list-full"]//div[@class="zm-item"]//a[@class="post-link"]')
        if post_links:
            links = post_links.extract_links(response)
            self.logger.info('[ZhihuSpider] links >> ', links)
            if links:
                self.logger.info('[ZhihuSpider] The links >> %s ', links)
                # yield Request(url='https://zhuanlan.zhihu.com/p/42290886', callback=self.parse_post_detail)
                for detail_link in links:
                    if detail_link:
                        yield Request(url=detail_link.url, callback=self.parse_post_detail)

    def parse_question_detail(self, response):
        current_url = response.url
        self.logger.info('[ZhihuSpider] - parse_question_detail :: This is an item page! %s', current_url)
        ## 登录直接pass
        if 'https://www.zhihu.com/signin' in current_url:
            pass
        response_selector = Selector(response=response)
        rt_title = response.css('div.QuestionHeader-main h1.QuestionHeader-title::text').extract()
        print('question >> ', current_url, rt_title)
        rt_richtext = response.css('div.QuestionHeader-detail span.RichText.ztext::text').extract()
        print('question >> ', rt_richtext)
        rt_tag = response.css('span.Tag-content div.Popover div::text').extract()
        print('question >> ', rt_tag)
        rt_content = response_selector.xpath(u'//div[@class="List-item"]//div[@class="RichContent-inner"]')
        if rt_content and len(rt_content) > 0:
            for rt_c in rt_content:
                content = ''
                r_content = rt_c.xpath(u'./span[contains(@class,"RichText")]/p/text()').extract()
                if r_content:
                    for c_text in r_content:
                        content += str(c_text) + '\n'
                        content = content.replace('　', '')
                print('r_content >> ', content)

    def parse_post_detail(self, response):
        comment_item = CommentItem()
        current_url = response.url
        comment_item['name'] = '知乎'
        comment_item['type'] = '推荐'
        comment_item['url'] = current_url
        comment_item['domain'] = Tools.domain(current_url)
        ## 登录直接pass
        if 'https://www.zhihu.com/signin' in current_url:
            pass
        self.logger.info('[ZhihuSpider] - parse_post_detail :: This is an item page! %s', current_url)
        rt_title = response.css('header.Post-Header h1.Post-Title::text').extract()
        title = ''
        if rt_title and len(rt_title) > 0:
            title = rt_title[0]
        comment_item['title'] = title
        rt_auther = response.css('div.AuthorInfo-head div.Popover a.UserLink-link::text').extract()
        author = ''
        if rt_auther and len(rt_auther) > 0:
            author = rt_auther[0]
        comment_item['author'] = author
        rt_content = response.css('main.App-main div.RichText.ztext.Post-RichText p::text').extract()
        content = ''
        if rt_content:
            for c_text in rt_content:
                content += str(c_text) + '\n'
                content = content.replace('　', '')
        comment_item['content'] = content
        ## https://www.zhihu.com/api/v4/articles/'+content_id+'/comments?order=normal&limit=20&offset=0&status=open
        response_selector = Selector(response=response)
        rt_comments = response_selector.xpath(u'//div[@class="CommentList"]/div[@class="CommentItem"]')
        comments = []
        if rt_comments and len(rt_comments) > 0:
            for rt_comment in rt_comments:
                comment = ''
                r_comment = rt_comment.xpath(u'.//div[contains(@class,"CommentItem-content")]//text()').extract()
                if r_comment:
                    for c_text in r_comment:
                        comment += str(c_text) + '\n'
                        comment = comment.replace('　', '')
                comments.append(comment)
        comment_item['comments'] = comments
        self.logger.info('comment_item >> %s', comment_item)
        comment_id = None
        comment_group = re.search('/p/[\d]+', current_url)
        if comment_group:
            comment_group = comment_group.group()
            if comment_group:
                comment_id = re.search('[\d]+', comment_group).group()
        if comment_id:
            try:
                url = 'https://www.zhihu.com/api/v4/articles/{}/comments?order=normal&limit=20&offset=0&status=open'.format(
                        comment_id)
                yield Request(url=url, callback=self.parse_comment, meta={'comment': comment_item})
            except Exception as e:
                self.logger.error("[ZhihuSpider] parse_post_detail error. The msg %s", str(e))
        yield comment_item

    def parse_comment(self, response):
        comment = response.meta['comment']
        result = response.body.decode()
        self.logger.info('[ZhihuSpider] - parse_comment results >> %s ', result)
        try:
            if result:
                result = Tools.clean_html(html_text=result)
            json_result = json.loads(result)
            datas = json_result['data']
            if datas and len(datas) > 0:
                comment['comments'] = None
                comments = []
                for data in datas:
                    rt_content = data['content']
                    if rt_content:
                        content = {
                            'content': Tools.clean_html(html_text=rt_content),
                            'score': None
                        }
                        comments.append(content)
                comment['comments'] = comments
                yield comment
            paging = json_result['paging']
            if paging and datas and len(datas) > 0:
                next_url = paging['next']
                if next_url:
                    yield Request(url=next_url, callback=self.parse_comment, meta={'comment': comment})
        except Exception as e:
            self.logger.error("[ZhihuSpider] parse_comment error. The msg %s", str(e))
