#! -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
from furl import furl

from Data import models, databaseApi

db = databaseApi.API(models.engine)

urls = {
    '八零电子书' : r'https://www.80txt.com',
    '顶点小说' : r'https://www.booktxt.net',
    '海岸文学' : r'https://www.haxshu.com',
    '飞速中文网': r'http://www.feizw.com/'
}
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
} 



def exit_book(name, author):
    '''判断数据库中是否有此书库存的方法'''
    res = db.query(models.Book, {'name':name, 'author':author})
    if len(res) <= 0:
        return False

    return True

def addbook(book):
    '''将书籍添加到数据库中的方法'''

    res, e = db.addone(models.Book, book)
    return res
def get_book(book):
    '''根据书籍信息在数据库中取得书籍id'''

    res = db.query(models.Book, book, True)
    if bool(res):
        return res.Id
    return None

class Booktxt(object):
    """
    爬取顶点小说
    """
    global headers, urls
    def __init__(self):
        self.fr = furl(urls['顶点小说'])
    

    def getbook(self, url, isread=True):
        # 取得网页源码放入BeautifulSoup中
        fr = furl(url)
        res = requests.get(fr, headers=headers)
        res.encoding = 'gbk'
        soup = BeautifulSoup(res.text, 'html.parser')
        # 分析页面元素，得到作者，书名，描述，全本下载地址
        author = soup.select('.box_con #maininfo #info p')[0].text.split('：')[1]
        name = soup.select('.box_con #maininfo #info h1')[0].string
        desc = soup.select('.box_con #maininfo #intro p')[0].text
        down_url = ''
        booktype = ''
        if 'booktxt.com' in fr.url:
            down_url = None
            booktype = soup.select('.con_top a')[1].text
        else:
            down_url = soup.select('.box_con #maininfo #info p font[color]')[0].a['href']

        if not isread:
            # 当不需要得到书籍的章节信息的时候
            
            book = {
                'name': name,
                'author': author,
                'showUrl': fr.url,
                'describe': desc,
                'booktype': booktype,
                'downUrl': down_url,
                'wordNum': None,
                'state' : None
            }
            return book

        secs = []
        issave = False
        for sec in soup.select('.box_con #list dl dd a'):
            if '第1章' in sec.string or '第一章' in sec.string or '楔子' in sec.string:
                issave = True
            if issave:
                sec_info = {
                    'href': sec['href'].split('/')[-1],
                    'title': sec.string
                }
                secs.append(sec_info)
        book = {
            'name' : name,
            'author' : author,
            'desc' : desc,
            'down_url':down_url,
            'secs' : secs
        }

        return book

    def get_sec_content(self, book_id, book_url):
        '''得到具体章节内容'''
        fr = furl(book_url)
        res = requests.get(fr, headers=headers)
        res.encoding = 'gbk'
        soup = BeautifulSoup(res.text, 'html.parser')

        # 章节名
        title = soup.select('.content_read .box_con .bookname h1')[0].string
        cata = '/book/'+str(book_id)
        upsec = '' # 上一章
        downsec = '' # 下一章
        for a in soup.select('.content_read .box_con .bookname .bottem1 a'):
            if '上一章' in a.string:
                upsec = a['href'].split('/')[-1]
                if upsec == 'index.html':
                    upsec = cata
            if '下一章' in a.string:
                downsec = a['href'].split('/')[-1]
                if downsec == 'index.html':
                    downsec = cata

        content = []
        for text in soup.select('#content')[0]:
            # 去除换行标签
            if str(text) == '<br/>':
                content.append('')
            else:
                content.append(str(text).rstrip('div>').rstrip('/br>'))

        contents = {
            'title': title,
            'upsec': upsec,
            'downsec': downsec,
            'cata': cata,
            'content': content
        }

        return contents

    def get_com_recommend(self):
        '''获取com后缀网址的经典推荐书籍'''

        fr = furl(r'http://www.booktxt.com/')
        res = requests.get(fr, headers=headers)
        res.encoding = 'gbk'
        soup = BeautifulSoup(res.text, 'html.parser')

        title = soup.select('#hotcontent .r h2')[0].text

        books = []
        for book in soup.select('#hotcontent .r ul li'):
            author = book.contents[2].string
            name = book.a.string
            url = book.a['href']
            # 查询数据库看是否存有书籍
            if not exit_book(name, author):
                # 如果数据库中没有这本书，就得到书籍的详细信息，再添加到数据库中
                fr.path = url
                addbook(self.getbook(fr.url, isread=False))
            
            books.append(get_book({'name':name, 'author':author}))

        return books

    def down_book(self, book_url):
        '''
            生成图书下载内容的函数 返回： content字符串，里面包含图书的所有章节和内容
        '''

        fr = furl(book_url)
        res = requests.get(fr, headers=headers)
        res.encoding = 'gbk'
        soup = BeautifulSoup(res.text, 'html.parser')

        name = soup.select('#info h1')[0].string
        book = '\n\t\t\t\t\t\t\t\t\t\t\t\t\t%s\n' % name
        for sec in soup.select('#list dd'):
            title = sec.string
            href = sec.a['href'].split('/')[-1]
             
            book_url =str(fr.path).strip('/')
            sr = furl(fr.url)
            sr.path = [book_url, href]
            content = self.get_str_content(sr)
            book = '%s\n%s\n%s' % (book, title,content)
            print(str(title), len(content))
        return name, book




    def get_str_content(self, sec_url):
        '''下载小说的时候获取小说的内容返回一个字符串'''

        res = requests.get(sec_url, headers=headers)
        res.encoding = 'gbk'
        soup = BeautifulSoup(res.text, 'html.parser')

        cts = soup.select('#content')[0]
        content = ''
        for ct in cts:
            if '<br/>' in str(ct):
                content = '%s\n' % content
            else:
                content = '%s\n%s' % (content, ct)
        return content







