from urllib import request
import json
from bs4 import BeautifulSoup  # Beautiful Soup是一个可以从HTML或XML文件中提取结构化数据的Python库
# 构造头文件，模拟浏览器访问
cartoonIndex = 'http://m.adwin.pw/admin/novel/index.html'
cartoonPageUrl = "http://m.adwin.pw/admin/novel/index.html?page=%s"
cartoonBaseByCartoonId = 'http://m.adwin.pw/admin/chapter/index/model_id/1/id/%s.html?page=%s'
chapterUrl = 'http://m.adwin.pw/admin/chapter/edit/model_id/2/id/%s/articleid/1.html'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',
    'Cookie':"last_read_notice_id_17=17; PHPSESSID=om9s8hrjlou94u69v2f6vle6c1; SERVERID=43d0834e2b7ed544e40baf15c363bb5f|1559196151|1559195658",
    'Host':'m.adwin.pw',
    'Upgrade-Insecure-Requests': 1
}
PRE_PAGE = '«'
NEXT_PAGE = '»'

def parse(url):
    page = request.Request(url, headers=headers)
    page_info = request.urlopen(page).read().decode('utf-8')  # 打开Url,获取HttpResponse返回对象并读取其ResposneBody
    return BeautifulSoup(page_info, 'html.parser')

def getUrlContent(url):
    try:
        return parse(url)
    except Exception as e:
        print("打开地址失败，url：" + url)
        try:
            return parse(url)
        except Exception as e1:
            print("重新打开地址失败，url：" + url)
            return None;

# 根据soup返回总页数
def getTotalPages(soup):
    if soup == None or soup.find('ul', class_='pagination') == None:
        return 1; #soup不存在，说明分页插件不存在，故只有一页
    else:
        lis = soup.find('ul', class_='pagination').find_all('li')
        if lis == None:
            return 1;
        len = lis.__len__()
        if len == 2:
            return 1;
        else:
            return int(lis[len-2].text)