# -*- coding: utf-8 -*-
import scrapy
from urllib import parse
import re
import json
from ..items import ZhihuItem


class ZhihuspiderSpider(scrapy.Spider):
    name = 'zhihuspider'
    #allowed_domains = ['www.zhihu.com']
    start_urls = ['http://www.zhihu.com/']
    base_url = 'https://www.zhihu.com/search?type=content&q='
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',
    }

    def start_requests(self):
        for keyword in self.settings.get('KEY_WORD'):
            url = self.base_url + parse.quote(keyword)
            yield scrapy.Request(url=url,headers=self.headers, callback=self.parse_first)

    def parse_first(self, response):
        try:
            url = response.css('.UserLink-link::attr(href)').extract_first()
            rs = re.search(r'//www.zhihu.com/people/(\w+)',url)
            if rs:
                url_token =rs .group(1)
                #url = 'https:' + url + '/following?page='
                url = 'https://www.zhihu.com/api/v4/members/'+url_token+'/followees?include=data%5B*%5D.answer_count%2Carticles_count%2Cgender%2Cfollower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F(type%3Dbest_answerer)%5D.topics&offset=0&limit=20'
                yield scrapy.Request(url=url,headers=self.headers,callback=self.parse_user)
        except TypeError:
            print('--------response--------',response.text)
            print('--------url-----',url,type(url))
            print('--------rs--------')
    def parse_user(self,response):
        info = json.loads(response.text)

        for i in range(len(info['data'])):
            item = ZhihuItem()
            item['id'] = info['data'][i]['id']
            item['name'] = info['data'][i]['name']
            item['total'] = info['paging']['totals']
            item['headline'] = info['data'][i]['headline']
            item['answer_count'] = info['data'][i]['answer_count']
            item['articles_count'] = info['data'][i]['articles_count']
            item['follower_count'] = info['data'][i]['follower_count']
            item['url_token'] = info['data'][i]['url_token']
            yield item
            url = self.base_url + info['data'][i]['url_token']
            yield scrapy.Request(url=url,headers=self.headers, callback=self.parse_first)
        url = info['paging']['next']
        yield scrapy.Request(url=url,headers=self.headers,callback=self.parse_user)
