from urllib import request
import json
from bs4 import BeautifulSoup  # Beautiful Soup是一个可以从HTML或XML文件中提取结构化数据的Python库
import adwin.baseinfo as baseinfo
import adwin.mysqlDemo as mysqlDemo

def getChapterContentByUrl(url):
    soup = baseinfo.getUrlContent(url)

    images = soup.find_all('img', {"class":"img_src"})
    content = []
    for image in images:
        content.append(image['src'])
    return content


# 插入新的章节
def addNewChapter(chapter):
    if chapter == None and chapter['chapter_id'] == None:
        print("漫画是空的")
        return
    if getChapterByChapterId(chapter['chapter_id']) == None:
        mysqlDemo.insertChapter(chapter)
    else:
        print("章节%s已经存在，不要重复插入" %(chapter['chapter_id']))

# 插入新的章节
def getChapterByChapterId(chapterId):
    return mysqlDemo.getChapterById(chapterId)

# 查询最新最新章节的
def getLastestChapter(cartoonId):
    return mysqlDemo.getLatestChapter(cartoonId)

# 查询最新最新章节的num
def getLastestNumOfChapter(cartoonId):
    chapter = getLastestChapter(cartoonId);
    if chapter == None:
        return 0
    else:
        return chapter[3];

# 获取所有num大于latestNum的章节
def selectChaptersByPageAndCheckLastestNumFromWeb(cartoonId, totalPage, latestNum):
    chapterUrls = []
    for pageNum in range(1, totalPage + 1):
        soup = baseinfo.getUrlContent(baseinfo.cartoonBaseByCartoonId %(cartoonId, pageNum))
        chapters = soup.find_all('li', {"class" : "chapter-item"})
        for chapter in chapters:
            if latestNum != None and latestNum != 0 and int(latestNum)>= int(chapter.find('span').text):
                continue
            c = {}
            c['bid'] = cartoonId
            c['num'] = chapter.find('span').text
            c['content'] = getChapterContentByUrl(chapter.find('a')['href'])
            c['title'] = chapter.find('a').text
            c['chapter_id'] = chapter.find('input', {"name":"id[]"})['value']
            chapterUrls.append(c)
    return chapterUrls
