# -*- coding: utf-8 -*-
import scrapy
from ZhiHu.items import ZhihuItem
from scrapy.http import Request,FormRequest
import re
import random
import json
import http.cookiejar
import time
import urllib.request
from lxml import etree
class ZhSpider(scrapy.Spider):
    name = 'zh'
    allowed_domains = ['zhihu.com']
    #start_urls = ['http://zhihu.com/']
    uapools = [
        "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.3; W…) Gecko/20100101 Firefox/58.0",
        "Mozilla/4.0(compatible;MSIE7.0;WindowsNT6.0)",
        "Opera/9.80(WindowsNT6.1;U;en)Presto/2.8.131Version/11.11", ]
    header=random.choice(uapools)
    cookiejar = http.cookiejar.CookieJar()
    headers = { "User-Agent":header,
                "Accept": "*/*",
                "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
                "Referer": "https://www.zhihu.com/topic",
                "Connection": "keep-alive",
                "X-Requested-With": "XMLHttpRequest",
                "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                "Host": "www.zhihu.com"
               }
    print(headers)
    def start_requests(self):
        print("我在爬")
        return [Request("https://www.zhihu.com/topics", meta={"cookiejar":1},callback=self.parse)]
    '''
    @获取一级话题
    '''
    def parse(self, response):
        print("我进入到知乎话题广场了")
        '''
        data={
                "client_id": "c3cef7c66a1843f8b3a9e6a1e3160e20",
                "grant_type": "password",
                "timestamp": "1522127243584",
                "username":"+8615936342766",
                "password":"wuyue123.",
                "lang": "en",
                "ref_source": "homepage",
                "source": "com.zhihu.web",
                "signature": "ed3d55c22f9f3ed29d0b54f4f6d6de7335f91d69",
                "utm_source": "baidu",
             }
        print("登陆中")
            
        return [FormRequest.from_response(response,
                                          meta={"cookiejar":response.meta["cookiejar"]},
                                          headers=self.headers,
                                          fromdata=data,
                                          callback=self.next,)]
        '''
        item=ZhihuItem()
        item["topicName"]=response.xpath('//li[@class="zm-topic-cat-item"]/a/text()').extract()
        item["topicID"]=response.xpath('//li[@class="zm-topic-cat-item"]/@data-id').extract()
        yield item
        for i in range(0, 1):
            topicname=item["topicName"][i]
            ID=item["topicID"][i]
            print(topicname)
            body={"method":"next",
                  "params":'{"topic_id":' + str(ID) + ',"offset":0,"hash_id":""}',
                  }

            yield FormRequest(url='https://www.zhihu.com/node/TopicsPlazzaListV2',
                                formdata=body,
                                headers=self.headers,
                                meta={"topic_id":str(ID),
                                      "offset":0,
                                      "topic_name":topicname,
                                      "cookiejar": response.meta["cookiejar"]},
                                callback=self.alltopics)

    '''
    @获取所有的子话题
    '''
    def alltopics(self,response):
        print("我在获取所有的子话题")

        #一级标题
        topic_id=response.meta["topic_id"]
        topic_name=response.meta["topic_name"]
        offset=response.meta["offset"]
        #json转换成python
        data=json.loads(response.body)
        print(topic_name)

        item=ZhihuItem()

        #获取子话题的名称
        sub_topic_pat='<img src=".*?" alt="(.*?)">'
        sub_topic=re.compile(sub_topic_pat).findall(str(data))
        #for i in range(0,len(sub_topic)):
        item["sub_topic"]=sub_topic
        time.sleep(2)
        print("subtopic:"+str(item["sub_topic"]))
        '''
        fh = open("E:\\python\\python练习\\" + str(topic_name) + ".txt", "a")
        fh.write(str(sub_topic))
        fh.close()
        '''

        #获取子标题的链接
        sub_href_pat='<a target="_blank" href="(.*?)"'
        sub_href=re.compile(sub_href_pat).findall(str(data))
        #item["sub_href"]=sub_href
        #获取子标题的id
        sid_pat = '<a target="_blank" href="/topic/(.*?)"'
        sub_id = re.compile(sid_pat).findall(str(data))
        item["sub_id"]=sub_id
        print("sub+id"+str(item["sub_id"]))
        print(len(item["sub_id"]))
        yield item

        count=len(sub_topic)
        print(count)
        print(data['r'])
        #请求所有的子话题内的文章
        if(data['r'])==0:
            for i in range(0,count):
                stopic=sub_topic[i]
                #构造子话题的链接
                shref="https://www.zhihu.com"+str(sub_href[i])+"/hot"
                sid=sub_id[i]
                #准备处理子话题内的所有链接
                yield FormRequest(url=shref,
                                  meta={'stopic_id':sid,#子话题ID
                                        'stopic':stopic,
                                        'topicname':topic_name,
                                        'topic_init': True,
                                        'i':i},
                                  callback=self.passagelists)
        else:
            count=0
        if count > 0:
            print("获取下一页的子ID")
            #构造offset,count为每次请求获取的子话题数
            offset = offset + count
            yield FormRequest(url='https://www.zhihu.com/node/TopicsPlazzaListV2',
                                     formdata={"method":"next",
                                                "params":'{"topic_id":' + str(topic_id) + ',"offset":'+str(offset)+',"hash_id":""}'},
                                     meta={"topic_id": topic_id,#大话题ID
                                           "offset": offset,
                                           "topic_name": topic_name,
                                           "cookiejar": response.meta["cookiejar"]},
                                     headers={'Referer': 'https://www.zhihu.com/topics'},
                                     callback=self.alltopics)
            #time.sleep(5)

    '''
    @获取文章列表网址
    '''
    def passagelists(self,response):
        print(response.meta["topicname"]+":"+response.meta["stopic"])
        #time.sleep(5)
        sid=response.meta["stopic_id"]
        spassages_data=response.body
        next_spassages_pat='after_id=(.*?)&'
        next_spassages_rst=re.compile(next_spassages_pat).findall(str(spassages_data))
        print(next_spassages_rst)
        url='https://www.zhihu.com/api/v4/topics/'+str(sid)+'/feeds/top_activity?&limit=20&after_id='+str(next_spassages_rst[0])
        print(url)
        #若想获取所有的文章，只需获取在response.body中找到 "next": 所对应的值（值为下一页的网址），进行不断请求即可
        #为了调试方便，此时只获取了第一页的20篇文章
        yield Request(url,callback=self.passagesinfo,meta={"cookiejar": True},headers={'authorization': 'oauth c3cef7c66a1843f8b3a9e6a1e3160e20'})

    '''
    @获取每篇文章的具体信息
    '''
    def passagesinfo(self,response):
        print("我进入文章列表了")
        item=ZhihuItem()
        data=bytes(response.text,response.encoding).decode("utf-8","ignore")
        #print(data)
        #每个target为一篇文章的信息，先获取每段target的内容，从中提取文章标题、作者、类型
        target_pat='{"target":(.*?), "attached_info": ".*?"}'
        target=re.compile(target_pat,re.S).findall(data)
        #print(target)
        passage_title_pat='"url": ".*?", "title": "(.*?)", "excerpt":'
        passage_author_pat='"type": "people", "name": "(.*?)", "is_advertiser"'
        # 判断是否为问答型，长度为0时，不是问答型
        passage_type_pat='{"author":.*?, "question": (.*?), "voteup_count"'

        for i in range(0,len(target)):
            passage_author_rst = re.compile(passage_author_pat, re.S).findall(str(target[i]))
            passage_title_rst = re.compile(passage_title_pat, re.S).findall(str(target[i]))
            passage_type_rst = re.compile(passage_type_pat, re.S).findall(target[i])
            if(len(passage_type_rst)==0):
                # 文章为article类型时，直接获取文章标题、作者、类型
                passage_type = "article"
                print("是article类型")
                print("文章标题"+str(passage_title_rst))
                print("作者"+str(passage_author_rst))
                item["articleTitle"]=passage_title_rst[0]
                item["articleAuthor"]=passage_author_rst[0]
                yield item
                #print("文章内容")，文章内容需要获取url属性对应的值，提交request请求来获取即可，此处省略
            else:
                # 文章是问答类型时，要获取问题题目、描述，答案内容、作者
                print("是问答型")
                question_title_pat='"question_type": .*?"title": "(.*?)", "type": "question"'
                question_excerpt_pat='"id": .*?, "excerpt": "(.*?)", "updated_time"'
                question_title=re.compile(question_title_pat,re.S).findall(target[i])
                question_excerpt=re.compile(question_excerpt_pat,re.S).findall(target[i])
                print("问题标题"+str(question_title[0]))
                print("问题描述"+str(question_excerpt[0]))
                item["questionTitle"]=question_title[0]
                item["questionDetail"]=question_excerpt[0]
                yield item
                time.sleep(3)
                questionid_pat='"type": "question", "id": (.*?)}, "id":'
                answerid_pat='"id": .*?}, "id": (.*?), "excerpt":'
                questionid=re.compile(questionid_pat,re.S).findall(target[i])
                answerid=re.compile(answerid_pat,re.S).findall(target[i])
                print("------------------------------------------------------")
                print("问题ID"+str(questionid[0])+":答案ID"+str(answerid[0]))
                url='https://www.zhihu.com/question/'+str(questionid[0])+'/answer/'+str(answerid[0])
                yield Request(url,callback=self.answerinfo,meta={"cookiejar": True})
                #time.sleep(5)
    '''
    @获取答案的内容和作者
    '''
    def answerinfo(self,response):
        print("我进入到答案网页了")
        data=response.body
        answer_author_pat='data-zop="{&quot;authorName&quot;:&quot;(.*?)&quot;'
        answer_content_pat='<span class="RichText CopyrightRichText-richText".*?<p>(.*?)</span>'
        answer_author=re.compile(answer_author_pat,re.S).findall(str(data))
        answer_content=re.compile(answer_content_pat,re.S).findall(str(data))
        item=ZhihuItem()
        item["answerAuthor"]=str(answer_author[0])
        item["answerContent"]=str(answer_content[0])
        yield item
#E:
#cd python\python练习\Zhihu
#scrapy crawl zh>>E:\python\python练习\b.txt