from ast import If
from dataclasses import dataclass
from unicodedata import name
from bs4 import BeautifulSoup # 网页解析,获得数据
import re # 正则表达式,进行文字匹配
import urllib.request,urllib.error # 制定url获取网页数据
import json
import js2py
import execjs
import requests


'''
环境:
pip install 'h5py<3.0.0' -i https://pypi.tuna.tsinghua.edu.cn/simple

说明: 
注意,传参url未必一定非得是url字符串,因为python的特性,传参数据类型是随意的,所以也可以传字典,数组;
    不过必须将字典数组转换为json字符串,且用 ### 代替 " 或 ' 号, 
    然后再接受参数的方法中用正则转义为原始字符串即可;
    
比如: 
字典 : {'name': 'comic漫漫', 'title': 'comic漫漫','age': 1}
转移为字符串: {###name###: ###comic漫漫###, ###title###: ###comic漫漫###,###age###: 1}
'''

'''
通用模块
'''
## 创建正则表达式对象
# indexComicPicture = re.compile(r'<img src="(.*?)">')
indexComicJumpurl = re.compile(r'onclick="viewComic\(\'(.*?)\'\)')
jsonRe = re.compile(r'[\r\n(\r\n)]')
totalSearchRe = re.compile(r'相近搜索結果（(.*?)）')
totalNumRe = re.compile(r'共(.*?)章')

## 公共方法
# 得到指定一个URL的网页内容
def askURL(url):
    head = {  # 模拟浏览器头部信息，向豆瓣服务器发送消息
        "User-Agent": "Mozilla / 5.0(Windows NT 10.0; Win64; x64) AppleWebKit / 537.36(KHTML, like Gecko) Chrome / 80.0.3987.122  Safari / 537.36",
        "referer": 'https://www.mangabz.com/'
    }
    # 用户代理，表示告诉豆瓣服务器，我们是什么类型的机器、浏览器（本质上是告诉浏览器，我们可以接收什么水平的文件内容）
    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html

# 处理返回结果 转换json格式, 替换换行符
def resultHandle(results):
    jsonResults = json.dumps(results).encode('utf-8').decode("unicode_escape")
    jsonResults = re.sub(jsonRe,' ', jsonResults)
    return "```" + jsonResults + "```"


def requestPictureUrl(url,flag):
    if(not flag):
        return []
    # 保持一次会话,加快处理速度
    session = requests.Session()
    url = url[0 : len(url) - 1]
    page_no = 1
    img_urls = []
    headers = {  # 模拟浏览器头部信息，向豆瓣服务器发送消息
        "User-Agent": "Mozilla / 5.0(Windows NT 10.0; Win64; x64) AppleWebKit / 537.36(KHTML, like Gecko) Chrome / 80.0.3987.122  Safari / 537.36",
        "referer": 'https://www.mangabz.com/'
    }
    page_url = url + '-p' + str(page_no) + '/'
    # page_url2 = url + '/history.ashx?cid=143004&mid=121&page=3&uid=0&language=1'
    # print(page_url)
    # r = requests.get(page_url)
    # print(page_url)
    r = session.get(page_url, headers=headers)
    # print(r)
    # if r.status_code == 404:
    #     break

    cid = re.compile('var MANGABZ_CID=[^;]*;').findall(r.text)[0].lstrip('var MANGABZ_CID=').rstrip(';')
    _mid = re.compile('var MANGABZ_MID=[^;]*;').findall(r.text)[0].lstrip('var MANGABZ_MID=').rstrip(';')
    _dt = re.compile('var MANGABZ_VIEWSIGN_DT=[^;]*;').findall(r.text)[0].lstrip('var MANGABZ_VIEWSIGN_DT="').rstrip('";')
    _sign = re.compile('var MANGABZ_VIEWSIGN=[^;]*;').findall(r.text)[0].lstrip('var MANGABZ_VIEWSIGN="').rstrip('";')
    img_ajax_url = page_url + 'chapterimage.ashx'
    while True:
        page_url = url + '-p' + str(page_no) + '/'
        # page_url2 = url + '/history.ashx?cid=143004&mid=121&page=3&uid=0&language=1'
        # print(page_url)
        # r = requests.get(page_url)
        # if r.status_code == 404:
        #     break
    
        # cid = re.compile('var MANGABZ_CID=[^;]*;').findall(r.text)[0].lstrip('var MANGABZ_CID=').rstrip(';')
        # _mid = re.compile('var MANGABZ_MID=[^;]*;').findall(r.text)[0].lstrip('var MANGABZ_MID=').rstrip(';')
        # _dt = re.compile('var MANGABZ_VIEWSIGN_DT=[^;]*;').findall(r.text)[0].lstrip('var MANGABZ_VIEWSIGN_DT="').rstrip('";')
        # _sign = re.compile('var MANGABZ_VIEWSIGN=[^;]*;').findall(r.text)[0].lstrip('var MANGABZ_VIEWSIGN="').rstrip('";')
        img_ajax_url = page_url + 'chapterimage.ashx'
        params = {
            'cid': cid,
            'page': page_no,
            'key': '',
            '_cid': cid,
            '_mid': _mid,
            '_dt': _dt,
            '_sign': _sign
        }
        # 每10次请求发送一次历史记录;防止接口不生效
        # if(page_no % 10 == 0): 
            # askURL(page_url2 + '?cid={0}&mid={1}&page={2}&uid={3}&language={4}'.format(params['_cid'],params['_mid'],params['page'],0,1))
        # ajax_r = askURL(img_ajax_url + '?cid={0}&page={1}&key={2}&_cid={3}&_mid={4}&_dt={5}&_sign={6}'.format(params['cid'],params['page'],params['key'],params['_cid'],params['_mid'],urllib.parse.quote(params['_dt']),params['_sign']) )
        ajax_r = session.get(img_ajax_url, headers=headers, params=params).content.decode()
        img_url = execjs.eval(ajax_r)
        if img_url is not None and len(img_url) > 0 : 
            img_urls.append(img_url[0])
        if len(img_url) <= 1 or page_no >= 500:
            break
        # print(img_url)
        page_no += 1
    return img_urls

'''
主要模块
'''
## 爬取首页漫画 : 爬取到首页推荐漫画对应的跳转url
'''
返回数据格式为json字符串,其中包含以下字段
title : 漫画名称
author: 作者
picture: 图片url
jumpurl: 跳转url地址
#以下字段为预留字段,非必须爬取,若未爬取将字段设置为空字符: ''
updateState: 更新状态信息: 连载中 完结
updateNum: 目前更新最新话,如果漫画已完结则爬取总集数
updateChapter: 章节名称 
type: 漫画类型
'''
def indexComic():
    # 主页网址
    baseurl = "https://www.mangabz.com"
    # 解析网页
    html = askURL(baseurl)  # 保存获取到的网页源码
    soup = BeautifulSoup(html, "html.parser")
    mainContent = soup.select('.carousel-right-list')
    mainList = mainContent[0].find_all(class_='carousel-right-item', limit=10)
    # 将数据组装为json格式
    # 返回值
    results = []
    for item in mainList:
        # 通过正则匹配需要的数据
        result = {"title": "", "author": "", "picture": "","jumpurl": "", "updateState": "", "updateNum": "", "updateChapter": "", "type": ""}
        result["picture"] = str(item.select('img')[0]['src'])
        result["jumpurl"] = baseurl + str(item.a['href'])
        result["title"] = str(item.select('.carousel-right-item-title')[0].a.text)
        result["updateNum"] = ''
        result["author"] = str(item.select('.carousel-right-item-subtitle')[0].text)
        result["updateState"] = ''
        type = ''
        for typeItem in item.select('.carousel-right-item-tag')[0].select('span') :
            type = typeItem.text + ',' + type
        result["type"] = str(type[0 : len(type) - 1])
        # 添加到列表中
        results.append(result)
    return resultHandle(results)


## 根据关键字搜索漫画 : 根据关键字查询后,获取查询到的漫画的url
'''
返回数据格式为json字符串,其中包含以下字段
{
total: totalPage: 总页数 ,这里不关心每页显示条数,但是如果能控制每页显示条数的话,建议每页显示50条
data: [{
title : 漫画名称
author: 作者
picture: 图片url
jumpurl: 跳转url地址
#以下字段为预留字段,非必须爬取,若未爬取将字段设置为空字符: ''
updateState: 更新状态信息: 连载中 完结
updateNum: 目前更新最新话,如果漫画已完结则爬取总集数
updateChapter: 章节名称 
type: 漫画类型
}]
}

'''
def keywordSearch(keyword,pageSize):
    baseurl = 'http://www.mangabz.com/search?title={keywordStr}&page={pageSizeStr}'
    baseurlOrigin = "https://www.mangabz.com"
    # 每页显示数量
    baseSize = 12
    # 解析网页
    # keyword.encode('unicode_escape').decode('utf-8')[2:]
    html = askURL(baseurl.format(keywordStr=urllib.parse.quote(keyword), pageSizeStr=pageSize))
    soup = BeautifulSoup(html, "html.parser")
    # jsonHtml1 = html[len('var g_search_data = '): (len(html) - 1)]
    # print(jsonHtml1)
    jsonObject = soup.select('.mh-list')[0].select('.mh-item')
    totalHtml = soup.select('.result-title')[0].text
    totalItem = int(re.findall(totalSearchRe, str(totalHtml))[0])
    total = totalItem // 12 if totalItem % 12 == 0 else totalItem // 12 + 1
    results = {"total": total, "data":[]}
    for object in jsonObject:
        # uniHtml = html.encode('utf-8').decode("unicode_escape")
        result = {"title": "", "author": "", "picture": "","jumpurl": "", "updateState": "", "updateNum": "", "updateChapter": "", "type": ""}
        result["title"] = str(object.select('.title')[0].a.text)
        result["author"] = ''
        result["picture"] = str(object.select('img')[0]['src'])
        result["jumpurl"] = baseurlOrigin + str(object.select('.title')[0].a['href'])
        result["updateState"] = str(object.select('.chapter')[0].span.text)
        result["updateNum"] = str(object.select('.chapter')[0].a.text)
        results["data"].append(result)
    return resultHandle(results)


## 检索某一部漫画的详细信息 : 根据url,来获取到漫画的章节详细信息
'''
返回数据格式为json字符串,其中包含以下字段
title : 漫画名称
author: 作者
picture: 图片url
describe: 漫画简介
#以下字段为预留字段,非必须爬取,若未爬取将字段设置为空字符: ''
updateState: 更新状态信息: 连载中 完结
updateNum: 目前更新最新话,如果漫画已完结则爬取总集数
type: 漫画类型
country: 国家
total: 章节总页数  
chapterInfo: [
    {
        'chapterName': 章节名称
        'chapterUrl': 章节转跳url
    }
]
'''
def chapterInfoComic(baseurl,pageIndex):
    firstUrl = 'https://www.mangabz.com/'
    html = askURL(baseurl)
    soup = BeautifulSoup(html, "html.parser")
    comicInfoHtml = soup.find_all(name='div', class_='detail-info-1')[0]
    allChapterHtml = soup.select('body > .container')[0]
    describeHtml = soup.find_all(name='div',class_='detail-info-2')[0]
    resultChapterInfo = []
    results = {'title': '', 'author': '', 'picture': '', 'updateState': '', 'updateNum': '', 'type': '','describe': '','country': '','total': '','chapterInfo': resultChapterInfo}
    # 获取章节
    for item in allChapterHtml.select('.detail-list-form-item'):
        chapterinfo = {'chapterName': '', 'chapterUrl': ''}
        chapterinfo['chapterUrl'] = str(firstUrl + item['href'])
        chapterinfo['chapterName'] = str(item.text.replace(' ', ''))
        resultChapterInfo.append(chapterinfo)
    # 获取漫画基本信息
    results["author"] = comicInfoHtml.select('.detail-info')[0].select('.detail-info-tip')[0].select('.detail-info-tip > span')[0].a.text
    results["country"] = ''
    results["updateState"] = comicInfoHtml.select('.detail-info')[0].select('.detail-info-tip')[0].select('.detail-info-tip > span')[1].span.text
    results["updateNum"] = re.findall(totalNumRe, str(allChapterHtml.select('.detail-list-form-title')[0].text))[0]
    results["picture"] = comicInfoHtml.img['src']
    results["title"] = comicInfoHtml.select('.detail-info-title')[0].text.replace(' ', '')
    results["describe"] = describeHtml.select('.detail-info-content')[0].text
    type = ''
    for typeItem in comicInfoHtml.select('.detail-info')[0].select('.detail-info-tip')[0].select('.detail-info-tip > span')[2].select('span') :
        type = typeItem.text + ',' + type
    results["type"] = str(type[0 : len(type) - 1])
    results['total'] = 1
    return resultHandle(results)

## 检索某一部漫画的具体某一章节的所有图片 : 爬取漫画章节的url,获取详细的图片路径
'''
返回数据格式为json字符串,其中包含以下字段
title : 漫画名称
name: 本章节名
pictureUrl: ['',''] 图片url
nextChapterUrl: 下一话章节url 到最后一章则传递 **** 字符
nextName: 下一章节名称 到最后一章则传递 **** 字符
previousChapterUrl: 上一话章节url 到第一章则传递 **** 字符
previousName: 上一章节名称 到第一章则传递 **** 字符
# 非必须字段
chapterUrl : 漫画详细信息url
'''
def pictureInfoComic(baseurl):
    firstUrl = 'https://www.mangabz.com'
    comicUrl = 'http://images.dmzj.com/'
    # https://www.mangabz.com/m227008/chapterimage.ashx?cid=227008
    html = askURL(baseurl)
    soup = BeautifulSoup(html, "html.parser")
    container = soup.find_all(class_='container')[1]
    pictureItems = requestPictureUrl(baseurl,True)
    # pictureItems = requestPictureUrl(baseurl,False)
    # print(pictureItems)
    pictureUrl = pictureItems
    results = {'title': '','name':'', 'chapterUrl': '', 'nextChapterUrl': '', 'nextName': '','previousChapterUrl': '', 'previousName': '','pictureUrl': pictureUrl}
    # # 当前话名称
    results['name'] = soup.select('.top-title')[0].text.strip()
    # # 获取标题信息 和 漫画详细信息url
    results['title'] = soup.select('title')[0].text.split('_')[0].strip()
    results['chapterUrl'] =  firstUrl + soup.select('.top-title')[0].a['href']
    # 获取下一章节 上一章节信息
    nextContainer = container.find(attrs={'src': 'https://css.mangabz.com/v202207231947/mangabz/images/icon_xiayizhang.png'})
    previousContainer = container.find(attrs={'src': 'https://css.mangabz.com/v202207231947/mangabz/images/icon_shangyizhang.png'})
    # print(container)
    # print(nextContainer)
    # print(previousContainer)
    if previousContainer is None :
        results["previousChapterUrl"] = '****'
        results["previousName"] = '****'
    else:
        results["previousChapterUrl"] = firstUrl + previousContainer.parent['href']
        results["previousName"] = ''
    if nextContainer is None : 
        results["nextChapterUrl"] = '****'
        results["nextName"] = '****'
    else:
        results["nextChapterUrl"] = firstUrl + nextContainer.parent['href']
        results["nextName"] = ''
    return resultHandle(results)

'''
main模块
'''
### 主方法,根据命令行传递的参数来执行不同的内容
if __name__ == "__main__":
    '''1'''
    # print(str(indexComic()))
    '''2'''
    # print(keywordSearch('你好',1))
    '''3'''
    # baseurl1 = 'http://manhua.dmzj.com/njsjm/'
    # baseurl2 = 'https://manhua.dmzj.com/putonggongjishiquantigongjierqienengercigongjidema'
    # baseurl1 = 'https://www.mangabz.com/121bz/'
    # print(chapterInfoComic(baseurl1, 1))
    '''4'''
    # url1 = 'https://manhua.dmzj.com/putonggongjishiquantigongjierqienengercigongjidema/64616.shtml'
    # url1 = 'https://www.mangabz.com//m258050/'
    url1 = 'https://www.mangabz.com//m91429'
    # url1 = 'https://www.mangabz.com/m143004/'
    print(pictureInfoComic(url1))

