# coding=utf8

import scrapy
import os
import urlparse

from scrapy.utils.project import get_project_settings
from biquge.items import Book

__author__ = "zouxiaoliang"


class BiqugeV1Spider(scrapy.Spider):
    name = "biquge-spider-v1"
    allowed_domains = [
        "http://www.biquge.la/",
        "http://www.biquge.la/book/",
    ]
    start_urls = [
        "http://www.biquge.la/xiaoshuodaquan/",
    ]
    main_url = "http://www.biquge.la/"

    website_dir = get_project_settings()["WEBSITE_DIR"]

    def check_website_dir(self):
        if not os.path.exists(self.website_dir):
            os.makedirs(self.website_dir)

    def parse(self, response):
        """
        对着start_url进行请求进行处理
        :param response:
        :return:
        """
        self.check_website_dir()
        website_menu = open(os.path.join(self.website_dir, "website_menu.txt"), mode="wb")
        selector = scrapy.Selector(response)
        like_list = selector.xpath('//*[@class="novellist"]/ul/li')
        for a in like_list:
            print a
            item = Book()
            item["name"] = a.xpath("a/text()").extract_first().encode("utf8")
            item["url"] = urlparse.urljoin(self.main_url, a.xpath("a/@href").extract_first().encode("utf8"))
            ab = [x.strip() for x in a.xpath("text()").extract_first().encode("utf8").split("/")]
            item["status"] = ab[0].replace(")", "").replace("(", "")
            item["author"] = ab[1]

            book_info = "%s: {\n    url: %s,\n    status:%s,\n    author:%s\n}\n" % (
                item["name"],
                item["url"],
                item["status"],
                item["author"]
            )
            self.logger.debug(book_info)
            website_menu.write(book_info)

            yield scrapy.Request(
                url=item["url"],
                callback=self.parse_book,
                dont_filter=True
            )

    def parse_book(self, response):
        """
        对整本书的进行解析
        1、目录写文件
        2、发起章节内容的抓取请求
        :param response:
        :return:
        """
        self.check_website_dir()
        selector = scrapy.Selector(response)
        like_list = selector.xpath('//*[@id="list"]/dl/dd')
        url = response.url
        book_menu = open(
            os.path.join(
                self.website_dir,
                "book_%s.txt" % str(url).replace("http://", "").replace("/", "_").replace(":", "_")
            ),
            mode="wb"
        )
        for a in like_list:

            if a.xpath("a/text()").extract_first() is None:
                self.logger.error('%s a.xpath("a/text()").extract_first() is None' % url)
            if a.xpath("a/@href").extract_first() is None:
                self.logger.error('%s a.xpath("a/@href").extract_first() is None' % url)

            line = "{\n    parent_url: %s,\n    chapter: %s,\n    chapter_url: %s\n}\n" % (
                url,
                a.xpath("a/text()").extract_first(),
                url + a.xpath("a/@href").extract_first()
            )
            book_menu.write(line.encode("utf8"))

            yield scrapy.Request(
                url=url + a.xpath("a/@href").extract_first(),
                callback=self.parse_chapter,
                dont_filter=True
            )

    def parse_chapter(self, response):
        """
        对请求得到的章节进行处理,得到小说的章节内容
        :param response:
        :return:
        """
        self.check_website_dir()
        self.logger.debug("parse url: %s" % response.url)

        selector = scrapy.Selector(response)

        chapter_name = selector.xpath('//*[@class="bookname"]/h1/text()').extract_first()
        chapter_name = chapter_name.strip()
        self.logger.debug("chapter :%s" % chapter_name)

        chapter_content = selector.xpath('//*[@id="content"]').extract_first()
        chapter_content = chapter_content.replace(
            "<br><br>",
            "\n"
        ).replace(
            '<div id="content"><script>readx();</script>',
            ""
        )

        wh = open(os.path.join(self.website_dir, "%s.txt" % chapter_name), mode="wb")
        wh.write(chapter_content.encode("utf8"))
        wh.close()
