# -*- coding: utf-8 -*-
import scrapy
import json
import re
from Snowball.items import SnowballItem

class SnowballSpider(scrapy.Spider):
    name = 'snowball'
    allowed_domains = ['xueqiu.com']
    start_urls = ['https://xueqiu.com/v4/statuses/public_timeline_by_category.json?sin_id=-1&category=6']

    def parse(self, response):
        result = json.loads(response.body)
        url_list = result["list"]
        for url in url_list:
            data = json.loads(url["data"])
            page_url = data["target"]
            pattern = re.compile(r'xueqiu.com/\d.*?/(\d.*)')
            page_id = re.findall(pattern,page_url)[0]
            # print(page_id)
            # print(page_id)
            # page_url = url["data"]["target"]
            yield scrapy.Request(url=page_url, callback=self.parse_page,meta={'page_id':page_id,"page_url":page_url})

        max_id = result["next_max_id"]
        next_urls = 'https://xueqiu.com/v4/statuses/public_timeline_by_category.json?sin_id=-1&category=6&max_id={}&count=15'.format(max_id)
        yield scrapy.Request(url=next_urls, callback=self.parse)

    def parse_page(self,response):
        '''
        解析文本数据
        '''
        text = response.css("div.article__bd__detail::text").extract_first()
        page_id = response.meta["page_id"]
        page_url = response.meta["page_url"]
        comments_url = 'https://xueqiu.com/statuses/comments.json?id={}'.format(page_id)
        yield scrapy.Request(url=comments_url,callback=self.parse_comment,meta={"text":text,"page_url":page_url})


    def parse_comment(self,response):
        '''
        解析评论数据
        '''
        snow = SnowballItem()
        comments = json.loads(response.body)
        snow["text"] = response.meta["text"]
        snow["url"] = response.meta["page_url"]
        count = comments['count']
        if count == 0:
            snow["comment"] = '还没有神评论'
        else:
            _comm = comments['comments'][0]
            snow["comment"] = _comm["description"]

        yield snow

