# -*- coding: utf-8 -*-
import scrapy
import time
import re
import urllib.request
from scrapy.http import Request, FormRequest
from scrapy.linkextractors import LinkExtractor
from zhihu.items import ZhihuItem

class ZhSpider(scrapy.Spider):
    name = 'zh'
    allowed_domains = ['zhihu.com']
    # start_urls = ['https://www.zhihu.com/topics']

    headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36"}
    name_tp1st_all = []
    id_tp1st_all = []
    # 一级话题下的所有二级话题是否取完的标志
    isover_tp1st_all = []
    offset = ""
    hash_id = ""
    cnt_tp1st = 0

    name_tp2nd_all = []
    id_tp2nd_all = []
    url_tp2nd_all = []
    cnt_tp2nd = 0

    def start_requests(self):
        topics_hpg = "https://www.zhihu.com/topics"
        return [Request(topics_hpg, meta={"cookiejar": 1}, headers=self.headers, callback=self.get_all_1st_topics)]

    # 获取所有一级话题列表
    def get_all_1st_topics(self, response):
        print("正在解析一级话题 ......")
        self.name_tp1st_all = response.xpath("//li[@class='zm-topic-cat-item']/a/text()").extract()
        self.id_tp1st_all = response.xpath("//li[@class='zm-topic-cat-item']/@data-id").extract()
        self.offset = self.get_offset(response)
        self.hash_id = self.get_hash_id(response)

        if self.name_tp1st_all and self.id_tp1st_all and len(self.name_tp1st_all) == len(self.id_tp1st_all):
            # 一级列表计数
            self.cnt_tp1st = len(self.name_tp1st_all)
            print("共找到 " + str(self.cnt_tp1st) + " 个一级话题,开始爬取各一级话题下的二级话题 ......")
            self.print_tp_list(1)

            self.init_isover_tp1st_all()

            # 预估一级话题下二级话题的最大数量
            limit = 2000
            # 遍历一级话题
            for i in range(0, self.cnt_tp1st):
            # for i in range(0, 1):
                # 控制每个一级话题访问二级话题时遍历多少次
                for page in range(0, (limit//20)):
                    ofst = page * 20
                    # 先判断是否完成的标志，只有未完成情况下才继续爬取二级话题
                    if self.get_isover_from_id_tp1st(self.id_tp1st_all[i]) == False:
                        form_data = self.get_form_data(self.id_tp1st_all[i], ofst, self.hash_id)
                        # 这里注意，不能用 FormRequest.from_response 方法先得到 response 再构造表单，因为这个网址不可访问，必须带表单访问才会有数据返回
                        # 而 request 方法没有 data 字段，所以用表单的构造方法访问
                        yield FormRequest("https://www.zhihu.com/node/TopicsPlazzaListV2",
                                          headers=self.headers,
                                          # 虽然网址一样，但表单不一样，所以不会过滤，不用开启禁止过滤选项
                                          formdata=form_data,
                                          callback=self.get_all_2nd_topics,
                                          meta={"cookiejar": True})
                        time.sleep(1)
                    else:
                        print("一级话题【"+str(self.name_tp1st_all[i])+"】已经爬取完")
                        break
        else:
            print("获取一级话题出错，数量为0")
            return

    def init_isover_tp1st_all(self):
        for i in range(0, self.cnt_tp1st):
            self.isover_tp1st_all.append(False)

    # 获取所有二级话题列表
    def get_all_2nd_topics(self, response):
        # 得到原 request HTTP　请求的正文
        request_body_data = urllib.request.unquote(str(response.request.body, encoding="utf-8"))
        # 由对应的 request 正文中找到是哪个一级话题请求的
        id_tp1st = re.compile('topic_id":(.*?),"offset').findall(request_body_data)
        offset_tp1st = re.compile('offset":(.*?),"hash_id').findall(request_body_data)
        name_tp1st = self.get_name_from_id_tp1st(id_tp1st[0])

        if id_tp1st and offset_tp1st:
            print("正在获取一级话题【"+str(name_tp1st)+"】下的二级话题 .......")
            # 注意这里是追加而不是直接赋值，所有二级话题全部存在一个列表中，而不论其属于哪一个一级话题
            data = response.body.decode("utf-8", "ignore")
            # 注意是 Unicode 编码
            name_tp2nd_all = re.compile(u'<strong>(.*?)<..strong>', re.S).findall(data)
            id_tp2nd_all = re.compile(u'"..topic..(\d{1,})..>', re.S).findall(data)
            # name_tp2nd_all = response.xpath("//div[@class='blk']/strong/text()").extract()
            # self.id_tp2nd_all = response.xpath("//div[@class='blk']/strong/text()").extract()
            # url_tp2nd_all = LinkExtractor(restrict_xpaths="//div[@class='blk']").extract_links(response)

            if name_tp2nd_all and id_tp2nd_all and len(name_tp2nd_all) == len(id_tp2nd_all):
                # 放到下面追加成功后才更新计数，否则下面如果追加失败，这里却更新了会导致列表越界
                # self.cnt_tp2nd = self.cnt_tp2nd + len(name_tp2nd_all)

                print("\t一级话题【" + str(name_tp1st) + "】下第【"+str(int(offset_tp1st[0])//20+1)+"】次加载，本次找到 " + str(len(name_tp2nd_all)) + " 个二级话题，正在存入 ......")
                # 本页提取到的数量 ，用于判断是否是最后一页，如果实际提取的数量小于请求的数量，则表示提取完了
                if len(name_tp2nd_all) < 10:
                    print("\t\t当前已是最后一页，只提取到 " + str(len(name_tp2nd_all)) + " 项，一级话题【" + str(name_tp1st) + "】下的二级话题已经爬取完")
                    self.set_isover_from_id_tp1st(id_tp1st[0], True)

                for i in range(0, len(name_tp2nd_all)):
                    # 将 unicode 转换为中文
                    try:
                        name = eval("u'"+str(name_tp2nd_all[i])+"'")
                        url = "https://www.zhihu.com/topic/" + str(id_tp2nd_all[i])
                        '''
                        self.name_tp2nd_all.append(name)
                        self.id_tp2nd_all.append(id_tp2nd_all[i])
                        url = "https://www.zhihu.com/topic/"+str(id_tp2nd_all[i])
                        self.url_tp2nd_all.append(url)
                        self.cnt_tp2nd = self.cnt_tp2nd + 1
                        '''

                        item = ZhihuItem()
                        item["id"] = id_tp2nd_all[i]
                        item["name"] = name
                        item["url"] = url
                        yield item

                    except Exception as err:
                        # 保存一下文件以便进行分析
                        print("错误："+str(err))

                #  如果标志为真，才跳出去计数
                '''
                yield Request("https://www.zhihu.com/question/61970370",
                                meta={"cookiejar": 1},
                                headers=self.headers,
                                callback=self.count_cnt)
                                '''

            else:
                print("获取一级话题【"+str(name_tp1st)+"】下的二级话题出错,可能已经提取完 ")
                self.set_isover_from_id_tp1st(id_tp1st[0], True)
                return

        else:
            print("提取一级话题 id 出错")

    def count_cnt(self, response):
        self.print_tp_list(2)

    def print_tp_list(self, grade):
        if grade == 1:
            list_name = self.name_tp1st_all
            list_id = self.id_tp1st_all
            cnt = self.cnt_tp1st
        elif grade == 2:
            list_name = self.name_tp2nd_all
            list_id = self.id_tp2nd_all
            cnt = self.cnt_tp2nd
            url = self.url_tp2nd_all

        if grade == 1:
            print("\n=========== 打印 一级 话题 ============，共有【"+str(int(cnt)+1)+"】项")
            for i in range(0, cnt):
                # print("名称："+str(list_name[i])+", id: "+str(list_id[i]))
                print("名称：%-30s  id: %-10s" % (list_name[i], list_id[i]))
        else:
            print("\n=========== 打印 二级 话题 ============，共有【"+str(int(cnt)+1)+"】项")
            for i in range(0, cnt):
                # print("名称："+str(list_name[i])+", id: "+str(list_id[i]))
                try:
                    print("名称：%-30s  id: %-10s  url: %-40s" % (list_name[i], list_id[i], url[i]))
                except Exception as err:
                    print("列表越界：i="+str(i))


    def get_name_from_id_tp1st(self, id):
        index = self.id_tp1st_all.index(id)
        return self.name_tp1st_all[index]

    def get_name_from_id_tp2nd(self, id):
        index = self.id_tp2nd_all.index(id)
        return self.name_tp2nd_all[index]

    def get_isover_from_id_tp1st(self, id):
        index = self.id_tp1st_all.index(id)
        return self.isover_tp1st_all[index]

    def set_isover_from_id_tp1st(self, id, value):
        index = self.id_tp1st_all.index(id)
        self.isover_tp1st_all[index] = value

    def get_offset(self, response):
        offset = re.compile("offset.*?: (\d{1,}),").findall(response.body.decode("utf-8", "ignore"))
        if offset:
            return offset[0]
        else:
            print("获取参数 offset 失败")

    def get_hash_id(self, response):
        hash_id = re.compile("hash_id.*?: (.*?)},").findall(response.body.decode("utf-8", "ignore"))
        if hash_id:
            return hash_id[0]
        else:
            print("获取参数 hash_id 失败")



    # 构造完整表单数据:用于获取二级话题
    def get_form_data(self, topic_id, offset, hash_id):
        data = {"method": "next",
                #"params": '{"topic_id":"+str(topic_id)+","offset":"+str(offset)+","hash_id":""+str(hash_id)+""}',
                "params": '{"topic_id":'+str(topic_id)+',"offset":'+str(offset)+',"hash_id":""}',
                #"params": '{"topic_id":'+topic_id+',"offset":'+offset+',"hash_id":"'+hash_id+""}',
                }
        return data


    def parse(self, response):
        pass

