# -*- coding: utf-8 -*-
import scrapy
from scrapy import Selector, Request
from scrapy.signals import spider_closed, spider_opened, item_scraped
from xiaohua.items import XiaohuaItem
from scrapy.http import FormRequest

class DuanziSpider(scrapy.Spider):
    name = 'duanzi'
    allowed_domains = ['xiaohua.com']

    def __init__(self, start_url=None, db='console', *args, **kwargs):
        self.start_url = start_url
        self.db = db
        super().__init__(*args, **kwargs)

    start_urls = ['https://www.xiaohua.com/duanzi/']

    # def start_requests(self):
    #     yield Request(self.start_url, callback=self.parse, cb_kwargs={"my_name": "zhangshan"})

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = cls(*args, **kwargs)
        spider._set_crawler(crawler)

        # spider = scrapy.Spider.from_crawler(crawler, *args, **kwargs)
        # 将信号处理函数（方法）与信号进行连接
        crawler.signals.connect(spider.close_spider, signal=spider_closed)
        crawler.signals.connect(spider.open_spider, signal=spider_opened)
        # crawler.signals.connect(spider.get_data_item, signal=item_scraped)
        return spider

    def close_spider(self, spider):
        with open("./data.csv", "r") as fp:
            # data = fp.read()
            self.hdfs.write("/data/data.csv", data=fp, encoding="utf-8")

    def open_spider(self, spider):
        import hdfs
        self.hdfs = hdfs.Client("http://gesukj.com:50070")

    def get_data_item(self, signal, sender, item, response, spider):
        pass


    def parse(self, response, my_name=None, **kwargs):

        from scrapy.http.response.html import HtmlResponse
        assert isinstance(response, HtmlResponse)

        items = response.xpath("//div[@class='one-cont']")
        for item in items:
            content = item.xpath("./p/a/text()").extract()[0]
            author = item.xpath("div/div/a/i/text()").extract()[0]
            # avatar = item.xpath("div/div/a/em/img")[0].attrib['src']
            # print(avatar)
            model = XiaohuaItem()
            model['author'] = author
            model['content'] = content
            # model['file_urls'] = ["http://127.0.0.1/files/typeeasy.exe"]
            # model['image_urls'] = ['http://t7.baidu.com/it/u=3616242789,1098670747&fm=79&app=86&size=h300&n=0&g=4n&f=jpeg?sec=1581643498&t=b6ece98a81b937b001c2eef438dc63d7']

            yield model

        # print(response.url)

        # from urllib.parse import urlparse
        # p = urlparse(response.url)

        # pages = response.xpath("//div[@id='Pager']/a")
        # for page in pages:
        #     assert isinstance(page, Selector)
        #     if 'href' in page.attrib:
        #         new_url = "{}://{}{}".format(p[0], p[1], page.attrib['href'])
        #         yield Request(new_url)

                # new_url = "http://www.xiaohua.com" + page.attrib['href']


    # def start_requests(self):
    #     return [Request(u) for u in self.start_urls]