from scrapy import Spider, Request
import json
from lxml import etree


def optimize_str(str):
    return str.strip().replace('\n', '').replace('\r', '').replace('</br>', '').replace(',', '，').replace(' ', '')


class AHComparebbsSpider(Spider):
    name = 'taptap_comment'
    start_urls = [
        # "https://www.taptap.com/app/2301/review?order=hot&page=1#review-list"
    ]

    def start_requests(self):
        # with open('urls.txt', encoding='utf-8') as f:
        #     urls = f.read().split('\n')
        urls = [
            "https://www.taptap.com/app/135293"
        ]
        for url in urls:
            yield Request(
                url=url + "/review?order=hot&page=1#review-list",
                callback=self.parse
            )

    def parse(self, response):
        print('parse:', response.url)
        game_name = response.xpath('//h1/text()').extract()[0].strip()
        data_list = response.xpath('//*[@id="reviewsList"]/li[contains(@class,"taptap-review-item collapse")]')
        for item in data_list:
            review_id = item.xpath('./@id').extract()[0].split('-')[-1]
            user_name = item.xpath('.//div[@class="item-text-header"]/span[@class="taptap-user"]/a/text()').extract()[0]
            user_id = item.xpath('.//div[@class="item-text-header"]//span[@class="taptap-user"]/@data-user-id').extract()[0]
            score = item.xpath('.//div["item-text-score"]/i[@class="colored"]/@style').extract()[0]
            score = score.replace('width: ', '').replace('px', '')
            score_time = ''.join(item.xpath('.//span[@class="text-score-time"]/text()').extract())
            postdate = ''.join(item.xpath('.//a[@class="text-header-time"]//span[@data-placement]/@title').extract()).replace('Posted at ', '')
            updatetime = ''.join(item.xpath('.//a[@class="text-header-time"]//span[@data-dynamic-time]/text()').extract())
            contents = ''.join(item.xpath('./div/div[@class="item-text-body"]//text()').extract())
            device = ''.join(item.xpath('.//span[@class="text-footer-device"]/text()').extract())
            vote_funny = ''.join(item.xpath('.//button[@data-value="funny" and @data-obj="review"]/span[last()]/text()').extract())
            vote_up = ''.join(item.xpath('.//button[@data-value="up" and @data-obj="review"]/span[last()]/text()').extract())
            vote_down = ''.join(item.xpath('.//button[@data-value="down" and @data-obj="review"]/span[last()]/text()').extract())
            data = {
                'game_name': game_name,
                'comment_type': '原评论',
                'review_id': review_id,
                'comment_id': review_id,
                'user_name': user_name,
                'user_id': user_id,
                'score': int(int(score) / 14),
                'score_time': score_time.replace('Played game for ', ''),
                'postdate': postdate,
                'updatetime': updatetime,
                'contents': optimize_str(contents),
                'device': device,
                'vote_funny': vote_funny if vote_funny != ''else '0',
                'vote_up': vote_up if vote_up != ''else '0',
                'vote_down': vote_down if vote_down != ''else '0',
            }
            yield data
            # comment_max_pages = ''.join(item.xpath('.//ul[@class="pagination"]/li[last()-1]/a/text()').extract())
            # if comment_max_pages == '':
            #     yield Request(
            #         url='https://www.taptap.com/ajax/review/comments/{}?id={}&page=1'.format(review_id, review_id),
            #         callback=self.parse_comment,
            #         meta={'review_id': review_id, 'game_name': game_name}
            #     )
            # else:
            #     for page in range(1, int(comment_max_pages) + 1):
            #         yield Request(
            #             url='https://www.taptap.com/ajax/review/comments/{}?id={}&page={}'.format(review_id, review_id, page),
            #             callback=self.parse_comment,
            #             meta={'review_id': review_id, 'game_name': game_name}
            #         )
        next_url = ''.join(response.xpath('//div[@class="main-body-footer"]//ul/li[last()]/a/@href').extract())
        if next_url != '':
            yield Request(
                url=next_url,
                callback=self.parse
            )

    def parse_comment(self, response):
        print('parse_comment:', response.url)
        json_data = json.loads(response.text)
        html = etree.HTML(json_data['data']['html'])
        data_list = html.xpath('//ul[@class="list-unstyled taptap-comments-list"]/li')
        for item in data_list:
            comment_id = item.xpath('./@id')[0]
            user_name = item.xpath('./@data-user')[0]
            user_id = item.xpath('./a/@href')[0]
            postdate = item.xpath('.//span[@class="text-footer-time"]/text()')[0]
            contents = ''.join(item.xpath('.//div[@class="item-text-body"]//text()'))
            vote_up = ''.join(item.xpath('.//button[@data-value="up" and @data-obj="reviewComment"]/span[last()]/text()'))
            vote_down = ''.join(item.xpath('.//button[@data-value="down" and @data-obj="reviewComment"]/span[last()]/text()'))
            data = {
                'game_name': response.meta['game_name'],
                'comment_type': '评论回复',
                'review_id': response.meta['review_id'],
                'comment_id': comment_id.split('-')[-1],
                'user_name': user_name,
                'user_id': user_id.split('/')[-1],
                'score': '',
                'score_time': '',
                'postdate': postdate[:10].replace('-', ''),
                'updatetime': '',
                'contents': optimize_str(contents),
                'device': '',
                'vote_funny': '',
                'vote_up': vote_up if vote_up != ''else '0',
                'vote_down': vote_down if vote_down != '' else '0',
            }
            yield data
