import re

import requests
from lxml import etree
from scrapy import Request
from scrapy.spiders import Spider
from scrapy.selector import Selector

from xiaoshuo.items import XiaoshuoItem, InfoItem


class BiQuGe(Spider):
    name = 'biquge'

    #  1玄幻, 2修真, 3都市, 4穿越, 5网游, 6科幻

    def start_requests(self):
        for book_type in range(1, 7):
            for page in range(1, 10):
                url = f'https://www.biquge5.com/shuku/{book_type}/allvisit-0-{page}.html'
                yield Request(url, callback=self.title_parse)

    def title_parse(self, response):
        selector = Selector(response)

        urls = selector.xpath('/html/body/div[2]/div[1]/ul[4]/li/div/div[1]/a/@href').extract()
        for url in urls:
            yield Request(url, callback=self.book_parse)

    def book_parse(self, response):
        selector = Selector(response)
        item = InfoItem()
        item['new_chapter'] = selector.xpath('//*[@id="info"]/p[4]/a/text()').extract_first()
        item['url'] = selector.xpath('//*[@id="info"]/p[4]/a/@href').extract_first()
        author = selector.xpath('//*[@id="info"]/p[1]/text()').extract_first()
        item['author'] = author.split(':')[1]
        info = selector.xpath('//*[@id="intro"]/p/text()').extract_first()
        if info:
            item['info'] = re.sub('["\']', '', info)
        else:
            item['info'] = ''
        item['book_name'] = selector.xpath('//*[@id="info"]/h1/text()').extract_first()
        item['book_type'] = selector.xpath('//*[@class="con_top"]/a[2]/text()').extract_first()
        item['endtime'] = selector.xpath('//*[@id="info"]/p[3]/text()').extract_first()
        item['endtime'] = re.findall(r'.*?([0-9:\s-]+)', item['endtime'])[0]
        item['status'] = selector.xpath('//*[@id="info"]/p[2]/text()').extract_first()
        item['img'] = selector.xpath('//*[@id="fmimg"]/img/@src').extract_first()
        ul = selector.xpath('//*[@id="list"]/ul')
        # global book_id
        for idx in range(len(ul)):
            # book_id = idx
            url = ul[idx].xpath('./li/a/@href').extract_first()
            yield Request(url=url, callback=self.content_parse)
            yield item

    def content_parse(self, response):
        sel = Selector(response)
        item = XiaoshuoItem()
        # item['idx'] = book_id
        content = sel.xpath('//*[@id="content"]/text()').extract()
        book_type = sel.xpath('//*[@class="con_top"]/a[2]/text()').extract_first()
        item['book_type'] = book_type
        book_name = sel.xpath('//*[@class="con_top"]/a[3]/text()').extract_first()
        item['book_name'] = book_name
        item['chapter'] = sel.xpath('//*[@class="bookname"]/h1/text()').extract_first()
        l = ''
        for i in content:
            i = re.sub(r'\s', '', i)
            l += i
        url = response.url
        for i in range(2, 4):
            url = url[:-5] + '_' + 'i' + url[-5:]
            html = requests.get(url).text
            html = etree.HTML(html)
            content = html.xpath('//*[@id="content"]/text()')
            if content is []:
                break
            for i in content:
                i = re.sub(r'\s', '', i)
                l += i
            item['content'] = l
        yield item



