# -*- coding: utf-8 -*-
import scrapy

from biquyun.items import BiquyunItem


class SearchSpider(scrapy.Spider):
    name = 'search'
    allowed_domains = ['www.biquyun.com']

    def start_requests(self):
        url = 'http://www.biquyun.com/modules/article/soshu.php'
        searchkey = getattr(self, 'searchkey', None)
        if searchkey is not None:
            url = url + '?searchkey=' + searchkey

        scrapy.Request.encoding = 'GBK'
        yield scrapy.Request(url, self.parse)

    def parse(self, response):
        self.log("=======================================")
        self.log(response.url)
        if ('souhu.php' in response.url):
            yield from self.parse_search_result(response)
        else:
            yield from self.parse_article_index(response)

    # 解析搜索结果列表
    def parse_search_result(self, response):
        items = response.css("#content .grid tr")
        del items[0]
        for item in items:
            row = item.css("td")
            biquyun_item = BiquyunItem()
            biquyun_item["title"] = row[0].css("::text").extract_first()
            biquyun_item["url"] = row[0].css("a ::attr(href)").extract_first()
            biquyun_item["lastArticle"] = row[1].css("::text").extract_first()
            biquyun_item["author"] = row[2].css("::text").extract_first()
            # biquyun_item["lastUpdatedTime"] = parse("Sat Oct 11 17:13:46 UTC 2003")
            biquyun_item["lastUpdatedTime"] = row[4].css("::text").extract_first()
            yield biquyun_item
            # loader = ItemLoader(item=BiquyunItem(), selector=item)
            # loader.add_css('author', 'span ::text')
            # loader.add_css('title', 'a ::text')
            # loader.add_css('url', 'a ::attr(href)')
            # yield loader.load_item()

    # 解析小说索引页
    def parse_article_index(self, response):
        items = response.css("#list dd a")
        for item in items:
            # self.log(item.css("::text").extract_first())
            # yield self.log(item.css("::attr(href)"))
            url = item.css("::attr(href)").extract_first()
            yield response.follow(url, callback=self.parse_article_content)
            # scrapy.Request

    def parse_article_content(self, response):
        self.log(response.css("title::text").extract_first())
