# -*- coding: utf-8 -*-
import re
import scrapy
from scrapy.http import Request
from zhihu.items import ZhihuItem
import time
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import urllib

class ZhihuSpiderSpider(scrapy.Spider):
    name = 'zhihu_spider'
    allowed_domains = ['zhihu.com']
    start_urls = ['https://www.zhihu.com/topics']
    count = 0


    def parse(self, response):
        url = "https://www.zhihu.com/topics#"
        topics = response.xpath('//li[@class="zm-topic-cat-item"]/a/text()').extract()

        # 获取所有主题(运动,文化,互联网等)
        for tpcCount in range(0,len(topics)):#len(topics)
            thisTopic = topics[tpcCount]
            thisurl = url + urllib.parse.quote(thisTopic)
            #print("I[" + thisurl + "]")
            items = ZhihuItem()
            items["huati_class"] = thisTopic

            dcap = dict(DesiredCapabilities.PHANTOMJS)
            dcap["phantomjs.page.settings.userAgent"] = ("Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36")
            browser = webdriver.PhantomJS(desired_capabilities=dcap)
            browser.get(thisurl)
            time.sleep(3)
            #img_name = "D:/Work/Python/tmp/" + thisTopic + ".jpg"
            #img = browser.get_screenshot_as_file(img_name)


            # 获取每个主题下的二级主题,通过phantomjs实现自动加载更多
            for tmsCount in range(0,1000):#1000
                try:
                    browser.find_element_by_link_text('更多').click()
                    time.sleep(1)
                except Exception as err:
                    #print(err)
                    break

            data = browser.page_source
            browser.quit()

            patCategory = '<a target="_blank" href="/topic/(.*?)">'
            category = re.compile(patCategory, re.S).findall(data)
            patName = '<strong>(.*?)<\/strong>'
            name = re.compile(patName,re.S).findall(data)
            for catCount in range(0,len(category)):#len(category)
                thisCategory = category[catCount]
                thisName = name[catCount]
                secUrl = "https://www.zhihu.com/topic/" + thisCategory
                items["huati_name"] = thisName
                items["huati_tid"] = thisCategory
                #print(" II[" + secUrl + "]")
                request = Request(secUrl, callback=self.jump2category)
                request.meta['item'] = items
                yield request
            time.sleep(2)


    def jump2category(self, response):
        items = response.meta['item']
        # 获取专栏/问答具体的URL
        Posting = response.xpath('//a[@data-za-detail-view-element_name="Title"]/@href').extract()
        for pstCount in range(0, len(Posting)):#len(Posting)
            thispost = Posting[pstCount]
            if "//" in thispost:
                postUrl = "https:" + thispost
                items["types"] = "article"
                items["link"] = postUrl
                request = scrapy.Request(postUrl, callback=self.postZhuanLan)
                request.meta['item'] = items
                yield request
                #print("  III[" + postUrl + "]")
            else:
                postUrl = "https://www.zhihu.com" + thispost
                items["types"] = "question"
                items["link"] = postUrl
                request = scrapy.Request(postUrl, callback=self.postWenDa)
                request.meta['item'] = items
                yield request
                #print("  III[" + postUrl + "]")

        #加载下面的内容
        #data = response.body.decode("utf-8","ignore")
        #patNext = ',&quot;next&quot;:&quot;(.*?)&quot;}'
        #nextPage = re.compile(patNext, re.S).findall(data)
        #if(len(nextPage) != 0):
            #yield Request(nextPage,callback=self.jump2category, meta={"cookiejar": True})


    def postZhuanLan(self,response):
        items = response.meta['item']
        #解析专栏内容
        zhuanlanTitle = response.xpath('//title[@data-react-helmet="true"]/text()').extract()
        author = response.xpath('//a[@class="PostIndex-authorName"]/text()').extract()
        article = response.xpath('//div[@class="RichText PostIndex-content av-paddingSide av-card"]/p/text()').extract()
        allArticle = ""
        for artCount in range(0,len(article)):#len(article)
            thisArticle = article[artCount]
            allArticle += thisArticle

        #items = ZhihuItem()
        items["title"] = zhuanlanTitle[0]
        items["author"] = author[0]
        items["content"] = allArticle
        items["question"] = ""

        yield items


    def postWenDa(self,response):
        items = response.meta['item']
        # 解析问答内容
        wendaTitle = response.xpath('//h1[@class="QuestionHeader-title"]/text()').extract()
        question = response.xpath('//span[@class="RichText"]/text()').extract()
        author = response.xpath('//a[@data-za-detail-view-element_name="User"]/text()').extract()
        contect = response.xpath('//span[@class="RichText CopyrightRichText-richText"]/text()').extract()

        #items = ZhihuItem()
        items["title"] = wendaTitle[0]
        items["author"] = author[0]
        items["content"] = contect[0]
        items["question"] = question[0]

        yield items
