# -*- coding: utf-8 -*-
import json
import re
import time

import scrapy
from scrapy.exceptions import CloseSpider
import logging

class SogouAsksSpider(scrapy.Spider):
    handle_httpstatus_list = [403]
    name = 'sogou_asks'
    allowed_domains = ['sogou.com']
    # start_urls = ['https://wenwen.sogou.com/cate/tag?tag_id=179798']
    def __init__(self,t=None,*args,**kwargs):
        super(SogouAsksSpider, self).__init__(*args, **kwargs)
        if t:
            self.t = t
        else:
            t = time.strftime('%Y-%m-%d', time.localtime(time.time()))
            self.t = t
    def start_requests(self):
        url = "https://wenwen.sogou.com/cate/tag?tag_id=179798"
        yield scrapy.Request(
            url=url,
            callback=self.parse,
            # errback=self.parse_err,
            dont_filter=True
        )
    def parse(self, response):
        if response.status != 403:
            N = response.meta.get("N")
            if N:
                N += 1
            else:
                N = 1
            m = response.xpath("//div[@class='sort-lst']//text()").get()
            if not m:
                asks_list = response.xpath("//ul[@class='sort-lst']/li")
                print(len(asks_list))
                for asks in asks_list:
                    ask_url = "https://wenwen.sogou.com"+asks.xpath("./a/@href").get()
                    # huida_num = asks.xpath("./a/div[@class='sort-rgt']/span[1]/text()").get()
                    # if huida_num:
                    #     num = int(re.search(r"\d+[.*]",huida_num).group())
                    # if num > 0:
                    # asks_time = asks.xpath("./a/div[@class='sort-rgt']/span[2]/text()").get()
                    # # print(asks_list)
                    # st = re.search(r"\d+([\w])", asks_time).group(1)
                    # if self.t:
                    #     print(st)
                    #     if st not in ['天','月','年']:
                    #         print("获取当天的数据>>>>>>>>>>>>>")
                    #         item = dict(
                    #             from_url=ask_url
                    #         )
                    #         yield scrapy.Request(
                    #             url=ask_url,
                    #             callback=self.parse1,
                    #             meta={"item": item}
                    #             # errback=self.parse_err,
                    #         )
                    #     else:
                    #         continue
                    # else:
                    item = dict(
                        from_url=ask_url
                    )
                    yield scrapy.Request(
                        url=ask_url,
                        # url="https://wenwen.sogou.com/question/q639967220386431033.htm?ch=ww.fly.newques",
                        callback=self.parse1,
                        meta={"item": item}
                        # errback=self.parse_err,
                    )
                    # break
                # 下一页获取
                if self.t:
                    # print(N)
                    if N < 7:
                        next_url = "https://wenwen.sogou.com/cate/tag?tag_id=179798&tp=0&pno={}&ch=ww.fly.fy3".format(N)
                        print(next_url)
                        yield scrapy.Request(
                            url=next_url,
                            callback=self.parse,
                            meta={"N": N},
                            dont_filter=True,
                        )
                    else:
                        print("翻页结束！！！！！")
                else:
                    next_url = "https://wenwen.sogou.com/cate/tag?tag_id=179798&tp=0&pno={}&ch=ww.fly.fy3".format(N)
                    yield scrapy.Request(
                        url=next_url,
                        callback=self.parse,
                        meta={"N":N},
                        dont_filter=True,
                    )
            else:
                print("抓取完成！！！！！！！！")
        else:
            # 剖出异常结束爬虫
            logging.info("被反扒，程序结束!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
            print("被反扒，程序结束！！！！")
            raise CloseSpider
    def parse1(self,response):
        if response.status != 403:
            level = response.meta.get("level")
            if level:
                level += 1
            else:
                level = 1
            item = response.meta.get("item")
            item["ask_title"] = response.xpath("//h1[@id='question_title']/span/text()").get()
            ask_content = response.xpath("//div[@id='question_content']/pre//text()").getall()
            if ask_content:
                item["ask_content"] = ''.join(ask_content)
            else:
                item["ask_content"] = ''
            item["ans_list"] = []
            # ans_list1 = response.xpath("//div[@class='replay-wrap common_answers']/div//pre")
            ans_list1 = response.xpath("//div[@id='bestAnswers']//pre")
            if ans_list1:
                for ans1 in ans_list1:
                    daan1 = ans1.xpath(".//text()").getall()
                    daan1 = ''.join(daan1)
                    if daan1:
                        item["ans_list"].append(daan1)
            ans_list2 = response.xpath("//div[@class='replay-wrap common_answers']//pre")
            if ans_list2:
                for ans2 in ans_list2:
                    daan2 = ans2.xpath(".//text()").getall()
                    daan2 = ''.join(daan2)
                    if daan2:
                        item["ans_list"].append(daan2)
            item["ans_list"] = str(item["ans_list"])
            item["from_type"] = 6
            # print(item)
            yield item
            print("相关问题的等级是：>>>>>>>>>",level)
            if level < 3:
                # 相关问题
                qid = re.search(r"\d+",response.url).group()
                xiang_url = "https://wenwenapi.sogou.com/suggsearch/service-search/ajax/q-related?qid={}&title={}&callback=jQuery_f93f0366966d85be58cfa43822c9811d"
                yield scrapy.Request(
                    url=xiang_url.format(qid,item["ask_title"]),
                    callback=self.xiangguan,
                    meta={"level": level}
                )
                print("有相关问题>>>>>>>>>>>>>>>>")
            else:
                print("相关问题采集完成！！！！！！！！！")

        else:
            # 剖出异常结束爬虫
            logging.info("被反扒，程序结束!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
            print("被反扒，程序结束！！！！")
            raise CloseSpider
    def xiangguan(self,response):
        level = response.meta.get("level")
        ret_txt = re.search(r"\((.*)\)", response.text)
        if ret_txt:
            ret_txt = ret_txt.group(1)
            ret = json.loads(ret_txt)
            for i in ret:
                url = "https://wenwen.sogou.com/z/q{}.htm?ch=ww.xqy.xgzs".format(i.get("id"))
                item = dict(
                    from_url=url
                )
                # print(url)
                yield scrapy.Request(
                    url=url,
                    # url="https://wenwen.sogou.com/question/q639967220386431033.htm?ch=ww.fly.newques",
                    callback=self.parse1,
                    meta={"item": item,"level": level}
                    # errback=self.parse_err,
                )
