# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request,FormRequest
import base64
import re
import json
from zhihu.items import ZhihuItem
import urllib.request
import execjs
class ZhSpider(scrapy.Spider):
    name = 'zh'
    allowed_domains = ['zhihu.com']
    start_urls = ['https://www.zhihu.com']
    header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
              'Referer':'https://www.zhihu.com/signup',
              'x-xsrftoken':'a413e66d-63a7-43e2-925f-4ab213908d88',
              "Host":'www.zhihu.com',
              'origin':'https://www.zhihu.com',
              'Connection':'keep-alive',
              "Authorization":'oauth c3cef7c66a1843f8b3a9e6a1e3160e20'
              }
    topicDataList = []

#https://www.zhihu.com/signup?next=%2F
    #http://www.zhihu.com/api/v3/oauth/sign_in
    def start_requests(self):
        yield  scrapy.Request(
            url = "https://www.zhihu.com/api/v3/oauth/captcha?lang=en",
            meta={"cookiejar": 1},
            dont_filter=True,
            callback=self.judge_captcha,
            headers=self.header
        )

    def judge_captcha(self,response):
        print(response.body)
        show = json.loads(response.body)["show_captcha"]
        if show:
            print("有验证码")
            yield scrapy.Request(
                url='https://www.zhihu.com/api/v3/oauth/captcha?lang=en',
                headers=self.header,
                dont_filter=True,
                method="PUT",
                meta={"cookiejar": response.meta["cookiejar"]},
                callback=self.dealCaptcha

            )
            #yield [Request("https://www.zhihu.com/api/v3/oauth/captcha?lang=en", headers=self.header, meta={"cookiejar": response.meta["cookiejar"]},
             #              callback=self.dealCaptcha)]
        else:
            print("没有验证码")
            data = {
                "username": "+8613661253075",
                "password": "zYh1992.",
                "grant_type": "password",
                "lang": "en",
                "timestamp": "1523756998227",
                "source": "com.zhihu.web",
                "signature": "08cc965bdf3434769c1ed9ef1550daa7d979d656",
                "client_id": "c3cef7c66a1843f8b3a9e6a1e3160e20",
                "ref_source": "homepage",

            }
            yield  scrapy.FormRequest(
                url='https://www.zhihu.com/api/v3/oauth/sign_in',
                headers=self.header,
                meta={"cookiejar": response.meta["cookiejar"]},
                formdata=data,
                callback=self.login_suc
            )
            #self.login(response=response,captcha="")

    def dealCaptcha(self,response):
        pic = json.loads(response.body)["img_base64"]
        imgData = base64.b64decode(pic)
        file = open("C:/Users/admin/Desktop/yzm.jpg", "wb")
        file.write(imgData)
        file.close()
        cap = input("请到桌面查看验证码图片，并输入")

        data = {
             "username": "+8613661253075",
             "password": "zYh1992.",
             "grant_type": "password",
             "lang": "en",
             "timestamp":"1523756998227",
             "source":"com.zhihu.web",
             "signature":"08cc965bdf3434769c1ed9ef1550daa7d979d656",
             "client_id":"c3cef7c66a1843f8b3a9e6a1e3160e20",
             "ref_source":"homepage",
         }
        data["captcha"] = cap
        print ("登陆中")
        print (data)

        yield scrapy.FormRequest(
            url='https://www.zhihu.com/api/v3/oauth/sign_in',
            headers=self.header,
            formdata=data,
            meta={"cookiejar": response.meta["cookiejar"]},
            callback=self.login_suc

        )

    def login_suc(self,response):
        #https: // www.zhihu.com / node / TopicFeedList
#{"offset":0,"topic_id":68,"feed_type":"smart_feed"}
#{"offset":4483.20729,"topic_id":68,"feed_type":"smart_feed"}
        #{"offset":4481.1672,"topic_id":68}
        yield scrapy.Request("https://www.zhihu.com/topic",meta={"cookiejar": response.meta["cookiejar"]}, headers=self.header,encoding="gb2312",
                       callback=self.get_topic)

#请求所有的类目
    def get_topic(self,response):
        urlList = response.xpath('//li[@class="zm-topic-cat-item"]/@data-id').extract()
        href = response.xpath('//li[@class="zm-topic-cat-item"]/a/@href').extract()
        for i in range(0,len(urlList)):


            #print(urlList[i])
            yield scrapy.Request("https://www.zhihu.com/topic",dont_filter=True,meta={'topicid':urlList[i],'offset':'0','cookiejar': response.meta['cookiejar']}, headers=self.header,callback=self.requestList)


#获取类目的文章列表
    def requestList(self,response):
        print("requestList")
        item = ZhihuItem()

        #保存文章详情
        #self.get_topicData(response=response)
        item["title"] = response.xpath("//a[@class='question_link']/text()").extract()
        item["href"] = response.xpath("//a[@class='question_link']/@href").extract()
        item["writer"] = response.xpath("//a[@class='author-link']/text()").extract()
        item["content"] = response.xpath("//textarea[@class='content']/text()").extract()

        if len(item["title"])!=0:
            yield item
        else:
            file = open("C:/Users/admin/Desktop/empy.html", "w")
            file.write(response.body.decode("utf-8"))
            file.close()
        print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")

        xsrf = response.xpath("//input[@name='_xsrf']/@value").extract()
        if len(xsrf) != 0:
            #找到标题的name，拼接到请求头 然后再请求分页数据
            self.header['X-Xsrftoken'] = xsrf[0]
        #找到每页最后一个文章的data-sore，拼接到data中
        score = response.xpath("//div[@class='feed-item feed-item-hook  folding']/@data-score").extract()
        if len(score) != 0:
            #请求一百页
            data = {
                "method": "next",
                "params": '{"offset": '+ response.meta['offset'] + ',"topic_id": ' + response.meta['topicid'] + ',"feed_type": "smart_feed"}'
            }
            #这里请求下一页
            print("请求下一页")
            yield scrapy.FormRequest(
                url='https://www.zhihu.com/node/TopicFeedList',
                headers=self.header,
                meta={'topicid': response.meta['topicid'], 'offset': score[len(score)-1]},
                formdata=data,
                callback=self.requestList
            )


