# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request, FormRequest
import re, random
import pymysql
import json
import urllib
from zhider.items import QuestionItem
import ssl
from scrapy.linkextractors import LinkExtractor

ssl._create_default_https_context = ssl._create_unverified_context
ua = ["Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36"]


class QuestionSpider(scrapy.Spider):
    name = 'Question'
    allowed_domains = ['zhihu.com']
    start_urls = ['http://zhihu.com/']
    hash_id = ''
    test = True
    oauth = ""

    def start_requests(self):
        # r = self.gettopic()
        # i = ('19550994','游戏','0'
        # for i in range(0, len(r)):
        #     id = r[i][0]
        # 根据topicId 获取列表页
        id = '19554169'
        url = "https://www.zhihu.com/topic/19554169/hot"
        print("start %s/%s topic %s"%(1,1,id))
        return [Request(url, meta={"cookiejar": 1}, headers=self.getHeader(), callback=self.parse)]

    def parse(self, response):

        id = re.search("topic/(\d+)",response.url).group(1)
        qurl = LinkExtractor(allow='/question/.+/answer/').extract_links(response)
        zurl = LinkExtractor(allow='//zhuanlan.+/p/').extract_links(response)
        for q in qurl:
            item = self.getItem(q, id)
            yield item

        for z in zurl:
            if (z.text == "侵权举报"):
                continue
            item = self.getItem(z, id)
            yield item

        html = response.body.decode("utf-8")

        # 没有oauth时获取
        if(self.oauth==""):
            oauthUrl = re.compile("<script src=\"([^\"']+main[.]app[^\"']+js)[\"']").findall(html)[0]
            oauthJs = urllib.request.urlopen(oauthUrl).read().decode("utf-8","ignore")
            self.oauth = re.compile("authorization:\"([^\"]+)\"").findall(oauthJs)[0]


        data = response.xpath("//div[@id='data']/@data-state").extract()[0]
        nextUrl = re.compile("\"next\":\"([^\"]+)\"").findall(data)[0]
        if(nextUrl):
            return [Request(nextUrl, meta={"cookiejar": response.meta["cookiejar"]}, headers=self.getHeader(response.url, self.oauth), callback=self.nextPage)]

    def nextPage(self, response):
        html = response.body.decode("utf-8")
        id = re.search("topic/(\d+)",response.headers["Referer"]).group(1)
        qurl = LinkExtractor(allow='/question/(\d+)').extract_links(response)
        zurl = LinkExtractor(allow='//zhuanlan.+/p/').extract_links(response)
        for q in qurl:
            item = self.getItem(q,id)
            yield item

        for z in zurl:
            if (z.text == "侵权举报"):
                continue
            item = self.getItem(z,id)
            yield item

        j = json.loads(html)
        nextUrl = j["paging"]["next"]
        if(self.test!=True):
            url = response.headers["Referer"]
            return [Request(nextUrl, meta={"cookiejar": response.meta["cookiejar"]}, headers=self.getHeader(url,self.oauth), callback=self.nextPage)]

    def getItem(self, q, id):
        item = QuestionItem()
        item["topicid"] = id
        item["url"] = q.url
        item["text"] = q.text
        return item


    def getHeader(self, referer="https://www.zhihu.com", oauth=""):
        h = {"User-Agent": random.choice(ua),"Referer": referer}
        if(oauth):
            h["Authorization"] = oauth
        return h