# -*- coding: utf-8 -*-
import json
import time

from scrapy.exceptions import CloseSpider
import scrapy
import logging

class SinaAsksSpider(scrapy.Spider):
    handle_httpstatus_list = [403]
    name = 'sina_asks'
    allowed_domains = ['iask.sina.com.cn']
    # start_urls = ['https://iask.sina.com.cn/c/159.html']
    def __init__(self,t=None,*args,**kwargs):
        super(SinaAsksSpider, self).__init__(*args, **kwargs)
        if t:
            self.t = t
        else:
            t = time.strftime('%Y-%m-%d', time.localtime(time.time()))
            self.t = t
    def start_requests(self):
        url = "https://iask.sina.com.cn/c/159.html"
        yield scrapy.Request(
            url=url,
            callback=self.parse,
            # errback=self.parse_err,
            dont_filter=True
        )
    def parse(self, response):
        if response.status != 403:
            N = response.meta.get("N")
            if N:
                N += 1
            else:
                N = 1
            asks_list = response.xpath("//div[@class='iask-list-con']/ul/li")
            print(len(asks_list))
            for asks in asks_list:
                ask_title = asks.xpath("./div/div[@class='question-title fl']/a/text()").get(default='')
                ask_url = "https://iask.sina.com.cn"+ asks.xpath("./div/div[@class='question-title fl']/a/@href").extract_first()
                ask_time = asks.xpath("./div/div[@class='question-other fr']/span[2]/text()").extract_first()
                # if self.t:
                #     if ask_time == self.t:
                #         print("获取当天数据！！！！！")
                #         item = dict(
                #             ask_title=ask_title,
                #             ask_time=ask_time,
                #             from_url=ask_url
                #         )
                #         yield scrapy.Request(
                #             url=ask_url,
                #             callback=self.parse1,
                #             meta={"item": item}
                #             # errback=self.parse_err,
                #         )
                #     else:
                #         continue
                # else:
                item = dict(
                    ask_title = ask_title,
                    ask_time = ask_time,
                    from_url = ask_url
                )
                yield scrapy.Request(
                    url=ask_url,
                    # url="https://iask.sina.com.cn/b/87uaQsuarm2n.html",
                    callback=self.parse1,
                    meta={"item": item}
                    # errback=self.parse_err,
                )
                # break

            # 列表下一页：
            if self.t:
                if N < 14:
                    next_url = response.xpath("//a[text()='下一页']/@href").extract_first()
                    print(next_url)
                    if next_url:
                        next_url = "https://iask.sina.com.cn" + next_url
                        yield scrapy.Request(
                            url=next_url,
                            callback=self.parse,
                            meta={"N": N},
                            dont_filter=True
                        )
                else:
                    print("翻页结束！！！！！")
            else:
                next_url = response.xpath("//a[text()='下一页']/@href").extract_first()
                if next_url:
                    next_url = "https://iask.sina.com.cn"+ next_url
                    yield scrapy.Request(
                                url=next_url,
                                callback=self.parse,
                                meta={"N": N},
                                dont_filter=True
                            )
        else:
            # 剖出异常结束爬虫
            logging.info("被反扒，程序结束！！！！！！！！！！！！！")
            print("被反扒，程序结束！！！！")
            raise CloseSpider
    def parse1(self,response):
        if response.status != 403:
            level = response.meta.get("level")
            if level:
                level += 1
            else:
                level = 1
            item = response.meta.get("item")
            if not item["ask_title"]:
                ask_title = response.xpath("//h1[@class='question-title ']/text()").get()
                ask_title1 = response.xpath("//h1[@class='problem-title-con']/p/text()").get()
                if ask_title:
                    item["ask_title"] = ask_title
                elif ask_title1:
                    item["ask_title"] = ask_title1

            ask_content = response.xpath("//pre[@class='question-text']/text()").extract()
            if ask_content:
                item["ask_content"] = ''.join(ask_content)
            else:
                ask_content1 = response.xpath("//pre[@class='problem-text']/text()").extract()
                item["ask_content"] = ''.join(ask_content1)

            item["ans_list"] = []
            answer1 = response.xpath("//div[@class='new-goods-answer']/ul/li/div/pre/text()").extract_first()
            if answer1:
                item["ans_list"].append(answer1)
            answer2 = response.xpath("//div[@class='new-other-answer answer_list']/ul/li")
            if answer2:
                for an in answer2:
                    daan = an.xpath(".//pre//text()").extract()
                    daan = ''.join(daan)
                    if daan:
                        item["ans_list"].append(daan)
            if item["ans_list"]:
                item["ans_list"] = str(item["ans_list"])
            else: # 旧版数据格式不一致处理
                ans_list1 = response.xpath("//li[@class='good_item']//pre//text()").getall()
                ans_list2 = response.xpath("//li[@class='other_item']/div[@class='list-text-con new-pre-answer-text']")
                # //pre/text()
                if ans_list1:
                    ans = ''.join(ans_list1)
                    item["ans_list"].append(ans)
                if ans_list2:
                    for ans in ans_list2:
                        a = ans.xpath(".//pre/text()").getall()
                        a = ''.join(a)
                        if a:
                            item["ans_list"].append(a)
                if item["ans_list"]:
                    item["ans_list"] = str(item["ans_list"])
                else:
                    item["ans_list"] = ''
                # item["ans_list"] = json.dumps(item["ans_list"])
            item["from_type"] = 5
            # print(item)
            yield item
            if level < 2:
                # 类似问题采集
                leisi = response.xpath("//div[@class='similar_tab mt10']/div[1]/h2/text()").extract_first()
                leisi1 = response.xpath("//div[@class='detail-answer-item detail-floor-item mt10'][1]/div[1]/h2/text()").extract_first()
                if leisi == '类似问题':
                    print("有类似问题>>>>>>>>>>>>>>>>1")
                    asks_list = response.xpath("//ul[@id='similar_list_hyih']/li")
                    for asks in asks_list:
                        num = asks.xpath("./em/text()").extract_first()
                        if num:
                            if int(num) == 1:
                                ask_title = asks.xpath("./dl/dt/a/@title").get(default='')
                                ask_url = asks.xpath("./dl/dt/a/@href").extract_first()
                            else:
                                ask_title = asks.xpath("./span/a/@title").get(default='')
                                ask_url = asks.xpath("./span/a/@href").extract_first()
                            if ask_url:
                                item = dict(
                                    ask_title=ask_title,
                                    from_url="https://iask.sina.com.cn"+ask_url
                                )
                                yield scrapy.Request(
                                    url="https://iask.sina.com.cn"+ask_url,
                                    callback=self.parse1,
                                    meta={"item": item,"level":level}
                                    # errback=self.parse_err,
                                )
                elif leisi1 == '类似问题':
                    print("有类似问题>>>>>>>>>>>>>>>>2")
                    asks_list = response.xpath("//ul[@id='similar_list_hyih']/li")
                    for asks in asks_list:
                        num = asks.xpath("./span[@class='num num-red']/text()").get()
                        if num:
                            if int(num) == 1:
                                ask_title = asks.xpath("./div/p[@class='ask-title']/a/@title").get(default='')
                                ask_url = asks.xpath("./div/p[@class='ask-title']/a/@href").get()
                            else:
                                ask_title = asks.xpath("./a/@title").get(default='')
                                ask_url = asks.xpath("./a/@href").get()
                            if ask_url:
                                item = dict(
                                    ask_title=ask_title,
                                    from_url="https://iask.sina.com.cn"+ask_url
                                )
                                yield scrapy.Request(
                                    url="https://iask.sina.com.cn" + ask_url,
                                    callback=self.parse1,
                                    meta={"item": item,"level":level}
                                    # errback=self.parse_err,
                                )
                # 保险相关知识采集
                xiangguan = response.xpath("//div[@class='similar_tab mt10']/div[1]/ul/li/h2/text()").get()
                if xiangguan == '保险相关知识':
                    print("有保险相关知识>>>>>>>>>>>>>>>>")
                    asks_list = response.xpath("//div[@class='similar-item current']/ul[@class='similar_list']/li")
                    for asks in asks_list:
                        num = asks.xpath("./em/text()").get()
                        if num:
                            ask_title = asks.xpath("./dl/dt/a/@title").get(default='')
                            ask_url = asks.xpath("./dl/dt/a/@href").extract_first()
                        else:
                            ask_title = asks.xpath("./span/a/@title").get(default='')
                            ask_url = asks.xpath("./span/a/@href").extract_first()
                        if ask_url:
                            item = dict(
                                ask_title=ask_title,
                                from_url="https://iask.sina.com.cn" + ask_url
                            )
                            yield scrapy.Request(
                                url="https://iask.sina.com.cn" + ask_url,
                                callback=self.parse1,
                                meta={"item": item, "level": level}
                                # errback=self.parse_err,
                            )
            else:
                print("类似问题或相关知识抓取完成！！！！")
        else:
            # 剖出异常结束爬虫
            logging.info("被反扒，程序结束！！！！！！！！！！！！！")
            print("被反扒，程序结束！！！！")
            raise CloseSpider




