import scrapy
from novel.items import BiQuGeCategoryItem
from novel.items import BiQuGeNovelItem
from novel.items import BiQuGeNovelChapterItem
from urllib.parse import urlparse


class biqugeScrapy(scrapy.Spider):
    name = 'biquge'
    start_urls = ['http://www.biquge.com.tw']

    novel_url_list = []

    # 分类
    def parse(self, response):
        categroy_list = response.xpath(
            '//*[@id="wrapper"]/div[2]/ul/li/a/text()').extract()
        categroy_url_list = response.xpath(
            '//*[@id="wrapper"]/div[2]/ul/li/a/@href').extract()

        for i, x in enumerate(categroy_list):
            if i == 0:
                continue
            item = BiQuGeCategoryItem()
            item["source"] = self.name
            item["categoryName"] = x.replace('小说', '')
            item["categoryUrl"] = response.urljoin(categroy_url_list[i])
            yield item
            yield scrapy.Request(url=item["categoryUrl"], callback=self.parse_category_novel)

    # 获取分类小说列表
    def parse_category_novel(self, response):
        novel_url_list_one = response.xpath(
            '//*[@id="newscontent"]/div[1]/ul/li/span[1]/a/@href').extract()
        novel_url_list_two = response.xpath(
            '//*[@id="newscontent"]/div[2]/ul/li/span[1]/a/@href').extract()

        novel_url_list_one.extend(novel_url_list_two)

        for x in novel_url_list_one:
            yield scrapy.Request(url=x, callback=self.parse_novel_detail)

    # 获取小说详情
    def parse_novel_detail(self, response):
        # 判断是否爬过，如果存在就不进行爬取，如果不存在，保留并开始爬取
        if response.url in self.novel_url_list:
            return
        else:
            self.novel_url_list.append(response.url)

        # url = urlparse(response.url)
        # urlPath = '%s://%s' % (url.scheme, url.hostname)

        # 分类名称
        categoryName = response.xpath(
            '//*[@id="wrapper"]/div[3]/div[1]/text()').extract()[2].split(
            '>')[1].replace(' ', '').replace('小说', '')

        novel = BiQuGeNovelItem()
        novel["source"] = self.name
        novel["categoryName"] = categoryName
        novel["url"] = response.url
        novel["name"] = response.xpath(
            '//*[@id="info"]/h1/text()').extract()[0]
        novel["author"] = response.xpath(
            '//*[@id="info"]/p[1]/text()').extract()[0].replace('作\xa0\xa0\xa0\xa0者：', '')
        novel["updateDate"] = response.xpath(
            '//*[@id="info"]/p[3]/text()').extract()[0].replace('最后更新：', '')
        novel["lastChapter"] = response.xpath(
            '//*[@id="info"]/p[4]/a/text()').extract()[0]
        novel["lastChapterUrl"] = response.url + response.xpath(
            '//*[@id="info"]/p[4]/a/@href').extract()[0]
        novel["introduce"] = response.xpath(
            '//*[@id="intro"]/p/text()').extract()[0]
        novel["img"] = self.start_urls[0] + response.xpath(
            '//*[@id="fmimg"]/img/@src').extract()[0]
        yield novel

        # 推荐
        recommend_url_list = response.xpath(
            '//*[@id="footer"]/div[1]/a/@href').extract()
        for x in recommend_url_list:
            yield scrapy.Request(url=self.start_urls[0]+x, callback=self.parse_novel_detail)

        # 章节地址列表
        chapter_link_url_list = response.xpath(
            '//*[@id="list"]/dl/dd/a/@href').extract()
        for i, x in enumerate(chapter_link_url_list):
            yield scrapy.Request(url=self.start_urls[0] + x, meta={'sort': i},  callback=self.parse_novel_chapter)

     # 获取小说章节内容
    def parse_novel_chapter(self, response):
        # 获取推荐小说
        recommend_url_list = response.xpath(
            '//*[@id="wrapper"]/div[4]/div/div[2]/div[2]/a/@href').extract()
        for x in recommend_url_list:
            yield scrapy.Request(url=self.start_urls[0]+x, callback=self.parse_novel_detail)

        categoryName = response.xpath('//*[@id="wrapper"]/div[3]/div/div[1]/text()').extract()[2].split(
            '>')[1].replace(' ', '').replace('小说', '')

        chapter = BiQuGeNovelChapterItem()
        chapter['source'] = self.name
        chapter['sort'] = response.meta["sort"]
        chapter['categoryName'] = categoryName
        chapter['novelName'] = response.xpath(
            '//*[@id="wrapper"]/div[3]/div/div[1]/a[2]/text()').extract()[0]
        chapter['name'] = response.xpath(
            '//*[@id="wrapper"]/div[3]/div/div[2]/h1/text()').extract()[0].replace(' ', '')
        chapter['url'] = response.url
        chapter['content'] = ','.join(response.xpath(
            '//*[@id="content"]/text()').extract())
        yield chapter
