import json

import scrapy
from scrapy.selector import Selector
from ZhiHuCrawler.items import *


class ZhihuSpider(scrapy.Spider):
    name = 'zhihu'
    allowed_domains = ['zhihu.com']
    start_urls = ['https://www.zhihu.com/topics']

    def parse(self, response):
        """大话题相关解析"""

        big_topic_data_ids = response.css(
            'body > div.zg-wrap.zu-main.clearfix > div.zu-main-content > div > div > ul > li::attr(data-id)').extract()
        small_topic_url = 'https://www.zhihu.com/node/TopicsPlazzaListV2'
        for data_id in big_topic_data_ids:
            # POST请求 form data数据
            data = {
                'method': 'next',
                'params': f'{{"topic_id": {data_id},"offset": {20},"hash_id":""}}'
            }
            yield scrapy.FormRequest(
                url=small_topic_url,
                formdata=data,
                callback=self.small_topic_parse,
            )
            if self.settings.get('ENV', 'dev') == 'dev':
                # 开发时，测试用
                break

    def small_topic_parse(self, response):
        """小话题相关解析"""

        result = json.loads(response.text)
        msg = result.get('msg', [])
        for div_html in msg:
            item = SmallTopicItem()
            # selector 会补全HTML结构
            s = Selector(text=div_html)
            item['logo_url'] = s.css('body > div > div > a:nth-child(1) > img::attr(src)').extract_first()
            item['name'] = s.css('body > div > div > a:nth-child(1) > strong::text').extract_first()
            item['desc'] = s.css('body > div > div > p::text').extract_first()
            yield item
            result = s.css('body > div > div > a:nth-child(1)::attr(href)').extract_first()
            essence_id = result.split('/')[-1]
            params = {
                'include':
                    'data[?(target.type=topic_sticky_module)].target.data[?(target.type=answer)].target.content,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp;data[?(target.type=topic_sticky_module)].target.data[?(target.type=answer)].target.is_normal,comment_count,voteup_count,content,relevant_info,excerpt.author.badge[?(type=best_answerer)].topics;data[?(target.type=topic_sticky_module)].target.data[?(target.type=article)].target.content,voteup_count,comment_count,voting,author.badge[?(type=best_answerer)].topics;data[?(target.type=topic_sticky_module)].target.data[?(target.type=people)].target.answer_count,articles_count,gender,follower_count,is_followed,is_following,badge[?(type=best_answerer)].topics;data[?(target.type=answer)].target.annotation_detail,content,hermes_label,is_labeled,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp,answer_type;data[?(target.type=answer)].target.author.badge[?(type=best_answerer)].topics;data[?(target.type=answer)].target.paid_info;data[?(target.type=article)].target.annotation_detail,content,hermes_label,is_labeled,author.badge[?(type=best_answerer)].topics;data[?(target.type=question)].target.annotation_detail,comment_count;',
                'limit': '10'
            }
            essence_url = f'https://www.zhihu.com/api/v4/topics/{essence_id}/feeds/essence'
            yield scrapy.FormRequest(
                url=essence_url,
                method="GET",
                formdata=params,
                callback=self.essence_parse
            )
            if self.settings.get('ENV', 'dev') == 'dev':
                break

    def essence_parse(self, response):
        """精华问题相关解析"""

        result = json.loads(response.text)

        datas = result.get('data', [])
        for data in datas:

            item = EssenceItem()
            item['content'] = data
            yield item

            question_id = data.get('target', {}).get('question', {}).get('id', '')
            if question_id:
                answer_url = f'https://www.zhihu.com/api/v4/questions/{question_id}/answers'
                params = {
                    'include':
                        'data[*].is_normal,admin_closed_comment,reward_info,is_collapsed,annotation_action,annotation_detail,collapse_reason,is_sticky,collapsed_by,suggest_edit,comment_count,can_comment,content,editable_content,attachment,voteup_count,reshipment_settings,comment_permission,created_time,updated_time,review_info,relevant_info,question,excerpt,is_labeled,paid_info,paid_info_content,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp,is_recognized;data[*].mark_infos[*].url;data[*].author.follower_count,vip_info,badge[*].topics;data[*].settings.table_of_content.enabled',
                    'offset': '',
                    'limit': '3',
                    'sort_by': 'default',
                    'platform': 'desktop',
                }
                yield scrapy.FormRequest(
                    url=answer_url,
                    method="GET",
                    formdata=params,
                    callback=self.answer_parse
                )
                if self.settings.get('ENV', 'dev') == 'dev':
                    # 开发时，测试用
                    break

        if self.settings.get('ENV', 'dev') == 'dev':
            return

        paging = result.get('paging', {})
        # 不是最后一个，则遍历完精华
        if not paging.get('is_end', True):
            next_url = paging.get('next', '')
            # 下一页精华
            if next_url:
                yield scrapy.Request(
                    url=next_url,
                    callback=self.essence_parse
                )

    def answer_parse(self, response):
        """回答相关解析"""
        num = int(response.meta.get('num', 0))
        num += 1

        result = json.loads(response.text)

        datas = result.get('data', [])
        for data in datas:
            item = AnswerItem()
            item['content'] = data
            yield item

        # 默认只爬取前500个回答
        if num > self.settings.get('ANSWER_NUM', 500):
            return

        if self.settings.get('ENV', 'dev') == 'dev':
            return

        paging = result.get('paging', {})
        if not paging.get('is_end', True):
            next_url = paging.get('next', '')
            if next_url:
                yield scrapy.Request(
                    url=next_url,
                    meta={'num': num},
                    callback=self.answer_parse
                )
