from urllib import request
import json
import time
from bs4 import BeautifulSoup  # Beautiful Soup是一个可以从HTML或XML文件中提取结构化数据的Python库
import adwin.baseinfo as baseinfo
import adwin.chapter as chapterApp
import adwin.mysqlDemo as mysqlDemo

def getAllChapterId(cartoonId, totalPage):
    chapterUrls = []
    maxChapterIndex = mysqlDemo.getChapterMaxNumById(cartoonId)[0]
    for pageNum in range(1, totalPage + 1):
        soup = baseinfo.getUrlContent(baseinfo.cartoonBaseByCartoonId %(cartoonId, pageNum))
        chapters = soup.find_all('li', {"class" : "chapter-item"})
        for chapter in chapters:
            if maxChapterIndex != None and int(maxChapterIndex)>= int(chapter.find('span').text):
                continue
            c = {}
            c['bid'] = cartoonId
            c['num'] = chapter.find('span').text
            c['content'] = chapterApp.getChapterContentByUrl(chapter.find('a')['href'])
            c['title'] = chapter.find('a').text
            c['chapter_id'] = chapter.find('input', {"name":"id[]"})['value']
            chapterUrls.append(c)
    return chapterUrls

def getCartoonInfoByCartoonId(cartoonId):
    cartoon = {}
    soup = baseinfo.getUrlContent(baseinfo.cartoonBaseByCartoonId %(cartoonId, 1))

    cartoon['bid'] = cartoonId
    cartoon['catid'] = '1'
    cartoon['click'] = 10000
    cartoon['zan'] = 10000
    cartoon['bookname'] = soup.find('h4').text
    cartoon['author'] = 'admin'
    cartoon['litpic'] = soup.find('div', class_ = 'col-sm-3').next.next['src']
    cartoon['litpicd'] = soup.find('div', class_ = 'col-sm-3').next.next['src']
    cartoon['contentlitpic'] = soup.find('div', class_ = 'col-sm-3').next.next['src']
    cartoon['addtime'] =  time.strftime('%Y.%m.%d',time.localtime(time.time()))
    cartoon['create_time'] =  time.strftime('%Y.%m.%d',time.localtime(time.time()))
    cartoon['description'] = soup.find_all('div', class_ = 'text-muted')[1].text.strip()
    cartoon['title'] = soup.find('h4').text
    if soup.find('ul', class_ = 'pagination') == None:
        pageNums = 1
    else:
        pageNums = soup.find('ul', class_ = 'pagination').find_all('li').__len__() - 2
    cartoon['chapters'] = getAllChapterId(cartoonId, pageNums)
    return cartoon

# 根据soup对象获取书本章节总页数
# ******************************************************************************************
# 获取所有漫画总页数
def getAllCartoonPages():
    soup = baseinfo.getUrlContent(baseinfo.cartoonIndex)
    return  baseinfo.getTotalPages(soup)

# 根据url获取书名情况
def getCartoonDetailByCartoonIdFromWeb(cartoonId):
    cartoon = {}
    soup = baseinfo.getUrlContent(baseinfo.cartoonBaseByCartoonId %(cartoonId, 1))
    cartoon['bid'] = cartoonId
    cartoon['catid'] = '1'
    cartoon['click'] = 10000
    cartoon['zan'] = 10000
    cartoon['bookname'] = soup.find('h4').text
    cartoon['author'] = 'admin'
    cartoon['litpic'] = soup.find('div', class_='col-sm-3').next.next['src']
    cartoon['litpicd'] = soup.find('div', class_='col-sm-3').next.next['src']
    cartoon['contentlitpic'] = soup.find('div', class_='col-sm-3').next.next['src']
    cartoon['addtime'] = time.strftime('%Y.%m.%d', time.localtime(time.time()))
    cartoon['create_time'] = time.strftime('%Y.%m.%d', time.localtime(time.time()))
    cartoon['description'] = soup.find_all('div', class_='text-muted')[1].text.strip()
    cartoon['title'] = soup.find('h4').text
    cartoon['chapterPages'] = baseinfo.getTotalPages(soup)
    return cartoon

# 查询书
def getCartoonByCartoonId(cartoonId):
    mysqlDemo.getCartoonById(cartoonId)

# 新增书
def addNewCartoon(cartoon):
    mysqlDemo.insertCartoon(cartoon)
