# -*- coding: utf-8 -*-
import time

import scrapy
from scrapy.exceptions import CloseSpider


class CsaiAsksSpider(scrapy.Spider):
    handle_httpstatus_list = [403]
    name = 'csai_asks'
    allowed_domains = ['csai.cn']
    # start_urls = ['https://www.csai.cn/wenda/baoxian/']
    def __init__(self,t=None,*args,**kwargs):
        super(CsaiAsksSpider, self).__init__(*args, **kwargs)
        if t:
            self.t = t
        else:
            t = time.strftime('%Y-%m-%d', time.localtime(time.time()))
            self.t = t
    def start_requests(self):
        url = "https://www.csai.cn/wenda/baoxian/"
        yield scrapy.Request(
            url=url,
            callback=self.parse,
            # errback=self.parse_err,
            dont_filter=True
        )
    def parse(self, response):
        if response.status != 403:
            N = response.meta.get("N")
            if N:
                N += 1
            else:
                N = 1
            asks_list = response.xpath("//ul[@class='qbwt_list']/li")
            print(len(asks_list))
            # print(asks_list)
            for asks in asks_list:
                # ask_title = asks.xpath("./div/div[@class='question-title fl']/a/text()").get(default='')
                ask_url = "https://www.csai.cn"+ asks.xpath("./h3/a/@href").extract_first()
                ask_time = asks.xpath("./h3/span/span/text()").extract_first()
                # if self.t:
                #     if ask_time == self.t:
                #         print("获取当天数据！！！！！")
                #         item = dict(
                #             # ask_title=ask_title,
                #             ask_time=ask_time,
                #             from_url=ask_url
                #         )
                #         yield scrapy.Request(
                #             url=ask_url,
                #             callback=self.parse1,
                #             meta={"item": item}
                #             # errback=self.parse_err,
                #         )
                #     else:
                #         continue
                # else:
                item = dict(
                    # ask_title = ask_title,
                    ask_time = ask_time,
                    from_url = ask_url
                )
                yield scrapy.Request(
                    url=ask_url,
                    callback=self.parse1,
                    meta={"item": item}
                    # errback=self.parse_err,
                )
                # break

            # 列表下一页：
            if self.t:
                if N < 3:
                    next_url = response.xpath("//a[text()=' > ']/@href").extract_first()
                    if next_url:
                        next_url = "https://www.csai.cn" + next_url
                        yield scrapy.Request(
                            url=next_url,
                            callback=self.parse,
                            meta={"N": N},
                            dont_filter=True
                        )
                else:
                    print("翻页结束！！！！！")

            else:
                next_url = response.xpath("//a[text()=' > ']/@href").extract_first()
                if next_url:
                    next_url = "https://www.csai.cn" + next_url
                    yield scrapy.Request(
                                url=next_url,
                                callback=self.parse,
                                meta={"N": N},
                                dont_filter=True
                            )
                else:
                    print("所有翻页结束！！！")
        else:
            # 剖出异常结束爬虫
            print("被反扒，程序结束！！！！")
            raise CloseSpider

    def parse1(self,response):
        if response.status != 403:
            level = response.meta.get("level")
            if level:
                level += 1
            else:
                level = 1
            item = response.meta.get("item")
            item["ask_title"] = response.xpath("//div[@class='wd_title']/h1/text()").get()
            item["ask_content"] = response.xpath("//div[@class='wd_title']/p//text()").getall()
            item["ask_content"] = ''.join(item["ask_content"])
            item["ans_list"] = []
            ans_list = response.xpath("//div[@class='wd_cn']/p//text()").getall()
            item["ans_list"].append(''.join(ans_list))
            if item["ans_list"]:
                item["ans_list"] = str(item["ans_list"])
                # item["ans_list"] = json.dumps(item["ans_list"])
            item["from_type"] = 7
            # print(item)
            yield item
            print("当前相关等级>>>>>",level)
            if level < 5:
                # 相关问题推荐采集
                xiangguan = response.xpath("//div[@class='tlwt_box']/div[@class='llg_wt']/text()").getall()
                x = ''
                for i in xiangguan:
                    if i :
                        x += i.strip()
                # print(x)
                if x == '相关问题推荐':
                    print("有相关问题>>>>>>>>>>>>>>>>")
                    asks_list = response.xpath("//dl[@class='lswt_box']/dt/a/@href").getall()
                    # print(asks_list)`
                    for asks in asks_list:
                        if asks:
                            # print(asks)
                            item = dict(
                                from_url="https://www.csai.cn"+asks
                            )
                            yield scrapy.Request(
                                url="https://www.csai.cn"+asks,
                                callback=self.parse1,
                                meta={"item": item,"level":level}
                                # errback=self.parse_err,
                            )
                        else:
                            continue
            else:
                print("相关问题抓取完成！！！！")
        else:
            # 剖出异常结束爬虫
            print("被反扒，程序结束！！！！")
            raise CloseSpider