import json
import re

import scrapy


class ChoutiTopSpider(scrapy.Spider):
    name = 'chouti_top'
    allowed_domains = ['chouti.com']
    start_urls = ['https://dig.chouti.com/']
    page_num = 2

    def parse(self, response):
        news_list = response.xpath('//div[@class="link-con"]/div')
        last_time = news_list[-1].xpath('./@data-time-into-pool').extract_first()
        for news in news_list:
            data_id = news.xpath('./@data-id').extract_first()
            img_url = news.xpath('./div/a/img/@src').extract_first()

            title = news.xpath('./div/div/div/a/text()').extract_first()
            source_url = news.xpath('./div/div/div/a/@href').extract_first()

            if source_url:
                ret = re.match(r'/link', source_url)

                if ret:
                    source_url = 'https://dig.chouti.com/link/%s' % data_id

            push_man = news.xpath('./div/div/div[2]//span[@class="left author-name"]/text()').extract_first()

            heat = news.xpath('./div/div/div[2]//span[@class="recommend-num left"]/text()').extract_first()

            item = {
                'type': 'info',
                'title': title,
                'source_url': source_url,
                'push_man': push_man,
                'heat': heat
            }
            yield item
            if img_url:
                yield scrapy.Request(url=img_url, callback=self.parse_img, cb_kwargs={'title': title})
            comment_url = 'https://dig.chouti.com/comments/show?id=%s&sort=1' % data_id
            yield scrapy.Request(url=comment_url, callback=self.parse_comment, cb_kwargs={'title': title})

        time_data_url = 'https://dig.chouti.com/link/hot?afterTime=%s' % last_time
        yield scrapy.Request(url=time_data_url, callback=self.time_json)
        print('已爬取第%s页数据' % str(self.page_num))

    def time_json(self, response):
        news_dict = json.loads(response.body)
        last_time = news_dict.get('data')[-1].get('time_into_pool')
        for info in news_dict.get('data'):
            data_id = info.get('id')
            title = info.get('title')
            source_url = info.get('originalUrl')

            ret = re.match(r'http://dig\.chouti\.com', source_url)
            if ret:
                source_url = 'https://dig.chouti.com/link/%s' % data_id

            push_man = info.get('submitted_user').get('nick')
            heat = info.get('ups')
            img_url = info.get('img_url')

            item = {
                'type': 'info',
                'title': title,
                'source_url': source_url,
                'push_man': push_man,
                'heat': heat
            }
            yield item
            if img_url:
                yield scrapy.Request(url=img_url, callback=self.parse_img, cb_kwargs={'title': title})
            comment_url = 'https://dig.chouti.com/comments/show?id=%s&sort=1' % data_id
            yield scrapy.Request(url=comment_url, callback=self.parse_comment, cb_kwargs={'title': title})

        if self.page_num <= 4:
            self.page_num += 1
            time_data_url = 'https://dig.chouti.com/link/hot?afterTime=%s' % last_time
            yield scrapy.Request(url=time_data_url, callback=self.time_json)
            print('已爬取第%s页数据' % str(self.page_num))

    def parse_comment(self, response, title):
        item = {
            'type': 'comment',
            'title': title,
            'comment_info': response.body

        }
        yield item

    def parse_img(self, response, title):
        item = {
            'type': 'img',
            'title': title[:8],
            'content': response.body
        }
        yield item
