from scrapy import Request, Spider
import json
import time
from tools.filterTags import filter_tags


class ZhiHuSpider(Spider):
    name = 'zhihu_answer'

    def __init__(self):
        # with open('url.txt', encoding='utf-8') as f:
        #     self.url_list = f.read().split('\n')
        self.url_list = ['https://www.zhihu.com/question/36083244']

    def start_requests(self):
        for question_url in self.url_list:
            cookies = {
                '_zap': '063b7d4c-f5ff-4aa4-848f-d4634834411c',
                'd_c0': '"APDkNoJzHQ-PTopoTMx7JcmcAxEXdLx2jsA=|1552454474"',
                '_xsrf': 'LZyJmxMyhBgh74PSw2MyNCut0n6s7YxS',
                'ISSW': '1',
                'l_cap_id': '"MmY4OGRmMWFhZWYzNDA0ZTkzMjQ3NWZjMDBmODQ1MTQ=|1555048872|3a04bf171c39a8d9aceb66f9c0e089deef8648a9"',
                'r_cap_id': '"MWMyN2Q4MmI3MWJmNDg3NThlMTBhZjEwNDU4Zjk0YzU=|1555048872|bba04320e118fc61e2133c782090b52bb4e60073"',
                'cap_id': '"OWVmMDUzZGEwMTU3NDBkYTk4NjM0Nzk3MGNiN2FjYjQ=|1555048872|880c2465d3845b34b8893dcc31374bc83bf524b8"',
                'tgw_l7_route': '4860b599c6644634a0abcd4d10d37251',
                'capsion_ticket': '"2|1:0|10:1555662892|14:capsion_ticket|44:YmNlNjNlMzUzNjkyNDhkZTg3NTQ3YTBkZGZkZDcxNDA=|3d848059f860c18c17a65d02cb54cc4a7d556ee5f2ae55401c7a7e41293f339d"',
                'z_c0': '"2|1:0|10:1555662893|4:z_c0|92:Mi4xdkpOZEF3QUFBQUFBOE9RMmduTWREeVlBQUFCZ0FsVk5MZGFtWFFEbFFPdWNPMUQtNEMtU1dQRHRreGIxeE9BbXF3|2644e1820c28db71602d083fca8c371828ac6375111b5dcf7db6ac141c28c0ff"',
                'q_c1': 'b46a5fc5367d43668194818c94689d65|1555662895000|1555662895000'
            }
            yield Request(
                url='https://www.zhihu.com/api/v4/questions/{}/answers?include=data%5B%2A%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_labeled%2Cis_recognized%2Cpaid_info%3Bdata%5B%2A%5D.mark_infos%5B%2A%5D.url%3Bdata%5B%2A%5D.author.follower_count%2Cbadge%5B%2A%5D.topics&limit=5&offset=10&platform=desktop&sort_by=default'.format(question_url.split('/')[-1]),
                callback=self.parse,
                cookies=cookies,
                meta={'url': question_url}
            )

    def parse(self, response):
        print('parse:', response.meta['url'])
        json_data = json.loads(response.body_as_unicode())
        answer_count = json_data['paging']['totals']
        answer_data_list = json_data['data']
        for answer_data in answer_data_list:
            data = {}
            data['question_title'] = answer_data['question']['title']
            data['question_id'] = answer_data['question']['id']
            data['question_created_time'] = time.strftime("%Y-%m-%d", time.localtime(answer_data['question']['created']))
            data['question_updated_time'] = time.strftime("%Y-%m-%d", time.localtime(answer_data['question']['updated_time']))
            data['author_name'] = answer_data['author']['name']
            data['author_url_token'] = answer_data['author']['url_token']
            data['author_follower_count'] = answer_data['author']['follower_count']
            data['author_headline'] = answer_data['author'][
                'headline'].replace('\n', '').replace(',', '，').replace(' ', '')
            data['answer_id'] = answer_data['id']
            data['answer_count'] = answer_count
            data['answer_voteup_count'] = answer_data['voteup_count']
            data['answer_comment_count'] = answer_data['comment_count']
            data['answer_created_time'] = time.strftime("%Y-%m-%d", time.localtime(answer_data['created_time']))
            data['answer_updated_time'] = time.strftime("%Y-%m-%d", time.localtime(answer_data['updated_time']))
            data['answer_content'] = filter_tags(answer_data['content']).replace('\n', '').replace(',', '，').replace(' ', '')
            yield data

        is_end = json_data['paging']['is_end']
        next_page = json_data['paging']['next']
        if not is_end:
            yield Request(
                url=next_page,
                callback=self.parse,
                meta={'url': response.meta['url']}
            )
