# -*- coding: utf-8 -*-
import scrapy

from biquge.items import BiqugeItem
from scrapy.signals import spider_closed, spider_opened


class XiaoshuoSpider(scrapy.Spider):
    name = 'xiaoshuo'
    allowed_domains = ['xbiquge.la']
    start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']

    def __init__(self, start_url=None, db='console', *args, **kwargs):
        self.start_url = start_url
        self.db = db
        super().__init__(*args, **kwargs)

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = cls(*args, **kwargs)
        spider._set_crawler(crawler)

        # spider = scrapy.Spider.from_crawler(crawler, *args, **kwargs)
        # 将信号处理函数（方法）与信号进行连接
        crawler.signals.connect(spider.close_spider, signal=spider_closed)
        crawler.signals.connect(spider.open_spider, signal=spider_opened)
        return spider

    def open_spider(self, spider):
        import hdfs
        self.hdfs = hdfs.Client("http://192.168.1.100:50070")

    def close_spider(self, spider):
        with open("./data.csv", "r") as f:
            data = f.read()
            self.hdfs.write("/data/data1.csv", data=data, encoding="utf-8")

    def parse(self, response):
        novel_infos = response.xpath("//div[@class='novellist']//a")

        for novel_info in novel_infos:
            item = BiqugeItem()
            item["novel_name"] = novel_info.xpath("./text()").extract_first()
            item["novel_url"] = novel_info.xpath("./@href").extract_first()
            # 返回首页item
            # yield item

            # 爬取详情
            yield scrapy.Request(
                item["novel_url"],
                callback=self.parse_detail,
                meta={"item": item}
            )

    def parse_detail(self, response):
        item = response.meta["item"]
        item["novel_kind"] = response.xpath("//div[@class='con_top']/a[2]/text()").extract_first()
        item["novel_author"] = response.xpath("//div[@id='info']//p[1]/text()").extract_first()[7:]
        yield item
