# -*- coding: utf-8 -*-
from datetime import datetime

import scrapy
from scrapy.crawler import CrawlerProcess

from buddha.items import NewsItem


class FoxueSpider(scrapy.Spider):
    name = "foxue"
    allowed_domains = ["foxue.org"]
    start_urls = (
        "http://fjyw.foxue.org/",
    )

    def parse(self, response):
        for box in response.css("div[class='box02']"):
            a = box.css("div[class='tit']>a[class='t1']")

            href = a.css("::attr(href)").extract_first()
            cate = a.css("::text").extract_first()
            if response.meta.__contains__("cate"):
                cate = ">".join([response.meta["cate"], cate])
                yield scrapy.Request(href, callback=self.parse_page, meta={"cate": cate})
                for href in box.css("div[class='Subshow']>ul>li>a::attr(href)").extract():
                    yield scrapy.Request(href, self.parse_detail, meta={"cate": cate})

            else:
                yield scrapy.Request(href, callback=self.parse, meta={"cate": cate})

    def parse_page(self, response):
        for href in response.css("#ahtcy_list_right>dd>a::attr(href)").extract():
            yield scrapy.Request(href, self.parse_detail, meta={"cate": response.meta["cate"]})

        href = response.css("#pages>a:nth-last-child(0)::attr(href)").extract_first()
        if href:
            yield scrapy.Request(href, self.parse_page, meta={"cate": response.meta["cate"]})

    def parse_detail(self, response):
        news = NewsItem()
        news["title"] = response.css("#Article>h2::text").extract_first()
        news["content"] = response.css("#Article>div[class='ahtcy_content']>p::text").extract()
        news["ctime"] = datetime.now()
        news["url"] = response.url
        news["page"] = 1
        news["pagenum"] = 1
        news["pic"] = ""
        yield news


if __name__ == "__main__":
    cp = CrawlerProcess()
    cp.crawl(FoxueSpider)
    cp.start()
