#! python3
# 下载所有 XKCD 漫画
import sys

import requests
import bs4
import os
import logging
import threading

from urllib import request

logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')


def makeDir(path):
    # 创建漫画保存目录
    if not os.path.exists(path):
        os.mkdir(path)


def downloadXKCD(path, startNum, endNum):
    ROOT_URL = "https://xkcd.in/"
    BASE_URL = ROOT_URL + 'comic?lg=cn&id={0}'

    for num in range(startNum, endNum + 1):
        try:
            url = BASE_URL.format(num)
            logging.debug("=========================================================================")
            logging.debug("开始用requests获取第" + str(num) + "页的漫画，地址为：" + url)
            res = requests.get(url)
            res.raise_for_status()
            res.encoding = 'utf-8'
            # print(res.text)

            logging.debug("开始解析 HTML ===================")
            soup = bs4.BeautifulSoup(res.text, features="html.parser")

            logging.debug("开始获取图片信息===================")
            elementsPic = soup.select('.comic-body>a>img')  # class属性为comic-body下的<a>元素下的<img>元素
            if len(elementsPic) == 0:
                logging.debug("没有找到图片信息===================")
                continue

            elementPic = elementsPic[0]
            fileName = "{0}-{1}.jpg".format(str(num), elementPic.get('title')).replace('/', '-')
            picUrl = ROOT_URL + elementPic.get('src')

            logging.debug("开始下载本页的漫画[" + fileName + "]===================")
            #  下载方法一
            # resPic = requests.get(picUrl)
            # resPic.raise_for_status()
            # file = open(os.path.join(path, fileName), 'wb')
            # for text in resPic.iter_content(100000):
            #     file.write(text)
            # file.close()

            #  下载方法二
            """
            函数：urllib.urlretrieve(url[, filename[, reporthook[, data]]]) 
            参数说明： 
            url：外部或者本地url ,url中不要含有中文，好像会出错。
            filename：指定了保存到本地的路径； 
            reporthook：是一个回调函数，当连接上服务器、以及相应的数据块传输完毕的时候会触发该回调。我们可以利用这个回调函数来显示当前的下载进度。 
            data：指post到服务器的数据。该方法返回一个包含两个元素的元组(filename, headers)，filename表示保存到本地的路径，header表示服务器的响应头。 
            """
            request.urlretrieve(picUrl, os.path.join(path, fileName), fun)

            # logging.debug("开始取下一页漫画的地址===================")
            # elementsNext = soup.select('.nextLink>a')  # class属性为nextLink下的<a>元素
            # if len(elementsNext) == 0:
            #     logging.debug("本页漫画下载完成，这是最后一页")
            #     break
            # else:
            #     elementNext = elementsNext[0]
            #     nextUrl = elementNext.get('href')
            #     url = BASE_URL + nextUrl
            #     logging.debug("本页漫画下载完成，下一页地址为[" + nextUrl + "]===================")
            #
            # num += 1

        except Exception as exc:
            logging.debug('这里有一个异常' + exc)
            continue


def fun(blockNum, blockSize, totalSize):
    """
    blocknum:当前的块编号
    blocksize:每次传输的块大小
    totalsize:网页文件总大小
    """
    percent = blockNum * blockSize / totalSize
    if percent > 1.0:
        percent = 1.0
    percent = percent * 100
    print("download : %.2f%%" % percent)


if __name__ == '__main__':

    path = 'F:/download/XkcdPicture'
    makeDir(path)

    downloadThreads = []
    for i in range(1, 30, 10):
        downloadThread = threading.Thread(target=downloadXKCD, args=(path, i, i + 9))
        downloadThreads.append(downloadThread)
        downloadThread.start()

    # 等待所有线程结束
    for downloadThread in downloadThreads:
        downloadThread.join()
    # 所有的 join()调用返回后，'Done.'字符串才会打印
    print('Done.')
