#!/usr/bin/env python
# encoding: utf-8
# pylint: disable-all

import sys
from bookspider import BookSpider
from lxml import etree


class shengxu3(BookSpider):
  def __init__(self, book_id):
    super().__init__()
    self.page_base = 'http://www.sxu3.com/book/'
    self.book_id = book_id
    self.name = ''
    self.section_list = []

  def prepare(self):
    index_page = '%s%d.html' % (self.page_base, self.book_id)
    rsp = self.download_url(index_page)
    # get book name
    doc = etree.HTML(rsp.content)
    self.name = ''.join(doc.xpath("//h1/text()"))
    # get book url range
    items = doc.xpath('//ul[@class="list-group list-charts"]//a/@href')
    # return page list.
    self.section_list = ['http://www.sxu3.com' + str(item) for item in items]

  def book_name(self):
    return self.name

  def Referer(self):
    return '%s%d.html' % (self.page_base, self.book_id)

  def sections(self):
    return self.section_list

  def fetch_section(self, pos, url):
    '''
    先抓取第一页，循环判断是否结束如未结束，找到下一页，最后将所有页面，组成 list 返回
    '''
    pages = []
    next_url = url
    while True:
      content = self.get_page(pos, next_url)
      pages.append(content)
      doc = etree.HTML(content)
      nxt = doc.xpath('//center[@class="red"]')
      if not nxt:
        break
      items = doc.xpath('//ul[@class="pager"]/li[last()]/a/@href')
      if not items:
        break
      next_path = str(items[0])
      next_url = 'http://www.sxu3.com' + next_path
    return pages

  def process_single_page(self, page):
    doc = etree.HTML(page)
    title = ''.join([item.text for item in doc.xpath("//div[@class='panel-heading']")])
    body_els = doc.xpath("//div[@class='panel-body content-body content-ext']")
    if not body_els:
      print('解析body失败')
      return None
    return {
      'title': title,
      'body': body_els
    }

  def process_section(self, pos, url, sections):
    title = ''
    body = []
    for page in sections:
      page_obj = self.process_single_page(page)
      if not page_obj:
        return
      title = page_obj['title']
      body.extend(page_obj['body'])
    title_el = etree.XML("<h1>%s</h1>" % title)
    body_first = body[0]
    body_first.insert(0, title_el)
    content = b''
    for element in body:
      content += etree.tostring(element, encoding="utf-8", pretty_print=True, method='html')
    # 去广告
    ad = '>>>本章未完，点击下一页继续阅读<<<'.encode('utf-8')
    content = content.replace(ad, b'')
    return {
      'title': title,
      'body': content
    }


def main():
  z = shengxu3(16173)
  z.crawl()
  z.make_epub()


if __name__ == '__main__':
  main()
