# -*- coding: utf-8 -*-
import json
from zhihuxjj.items import BookItem
from scrapy import Spider,Request

class ZhihuxjjSpider(Spider):
    name='nanda'
    allowed_domains = ["http://lib.nju.edu.cn/"]
    start_urls = ["http://opac.nju.edu.cn/browse/cls_browsing.php#/categorySearchList"]
    #start_user = "jixin"
    start_item = "jixin"
    #分类列表
    classes_url = 'http://opac.nju.edu.cn/find/unify/search'
    #个人信息
    user_url = 'https://www.zhihu.com/api/v4/members/{user}?include={include}'
    user_include = 'locations'
    playload = {"docCode":[],\
                "searchFieldContent":"A","searchField":"callNo","resourceType":[],\
                "subject":[],\
                "discode1":[],\
                "publisher":[],\
                "locationId":[],\
                "collectionName":[],\
                "author":[],\
                "langCode":[],\
                "countryCode":[],\
                "coreInclude":[],\
                "ddType":[],\
                "verifyStatus":[],\
                "group":[],\
                "sortField":"relevance","sortClause":"desc","page":1,"rows":10,"onlyOnShelf":'null'}
    #可选内容：locations,employments,gender,educations,business,voteup_count,thanked_Count,follower_count,following_count,cover_url,following_topic_count,following_question_count,following_favlists_count,following_columns_count,avatar_hue,answer_count,articles_count,pins_count,question_count,commercial_question_count,favorite_count,favorited_count,logs_count,marked_answers_count,marked_answers_text,message_thread_token,account_status,is_active,is_force_renamed,is_bind_sina,sina_weibo_url,sina_weibo_name,show_sina_weibo,is_blocking,is_blocked,is_following,is_followed,mutual_followees_count,vote_to_count,vote_from_count,thank_to_count,thank_from_count,thanked_count,description,hosted_live_count,participated_live_count,allow_message,industry_category,org_name,org_homepage,badge[?(type=best_answerer)].topics
    def start_requests(self):
        #yield Request(self.followees_url.format(user=self.start_user,offset=0),callback=self.parse_fo)
        #yield Request(self.user_url.format(user=self.start_user,include = self.user_include),callback=self.parse_user)
        for index in range(1,10):
            self.playload['page'] = index
            yield Request(self.classes_url, body=json.dumps(self.playload), method='POST', headers={'Content-Type': 'application/json',\
                                                                                                "Host": "opac.nju.edu.cn",\
                                                                                                "Origin" : "http://opac.nju.edu.cn",\
                                                                                                "Accept-Encoding":"gzip, deflate",\
                                                                                                "Accept-Language":"zh-CN,zh;q=0.9",\
                                                                                                "Accept":"application/json, text/plain, */*",\
                                                                                               "groupCode":"200027" },callback=self.parse_user)


    def parse_user(self, response):
        result = json.loads(response.text)
        bookList = result['data']['searchResult']
        print(result)
        print(bookList)
        for book in bookList:
            item = BookItem()
            item['recordId'] =book['recordId']
            item['title'] =book['title']
            item['author'] =book['author']
            yield item

    def parse_fo(self, response):
        results = json.loads(response.text)
        for result in results['data']:
            yield Request(self.user_url.format(user=result['url_token'], include=self.user_include),callback=self.parse_user)
            if result['url_token'] !=self.start_user:
                yield Request(self.followees_url.format(user=result['url_token'], offset=0),callback=self.parse_fo)  # 对关注者的关注者进行遍历，爬取深度depth+=1
            else:
                pass
        if results['paging']['is_end'] is False: #关注列表页是否为尾页
            next_url = results['paging']['next'].replace('http','https')
            yield Request(next_url,callback=self.parse_fo)
        else:
            pass

