'''
Created on 2017年12月29日

@author: shily
'''
import requests
import os
import re
from bs4 import BeautifulSoup
import threading
import queue
import time
from builtins import IOError
import linecache
from mmjpgConfig import logger, basePath

baseUrl = 'http://m.mmjpg.com'
'''
多线程下载类
'''
class threadDownload(threading.Thread):
    def __init__(self, que, filePath, referer, no):
        threading.Thread.__init__(self)
        self.que = que
        self.filePath = filePath
        self.referer = referer
        self.no = no
    def run(self):
        while True:
            if not self.que.empty():
                getImgByUrl(self.que.get(), self.filePath, self.referer)
            else:
                break
            
'''
获取重定向后的链接(暂时不需要)
'''
def getRedirectsImgUrl(imgUrl):
    #请求头
    headers = {
        'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
    }
    #禁止重定向
    try:
        ir = requests.get(imgUrl.strip(), timeout = 10, headers = headers, allow_redirects=False)
        if ir.status_code == 302:
            orgImgUrl = ir.headers["Location"]
            return orgImgUrl
        else:
            logger.warn("未获取到%s重定向信息" % imgUrl)
    except requests.exceptions.RequestException  as e:
        logger.warn("请求%s出错" % imgUrl)
        logger.warn(e)
    return False



'''
下载图片,
请求头中添加referer,否则无法下载
'''
def getImgByUrl(imgUrl, filePath, referer):
    #请求头
    headers = {
        'Accept' : 'image/webp,image/*,*/*;q=0.8',
        'Accept-Encoding' : 'gzip, deflate, sdch',
        'Accept-Language' : 'zh-CN,zh;q=0.9',
        'Connection' : 'keep-alive',
        'Host' : 'fm.shiyunjj.com',
        'Referer' : referer,
        'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
    }
#     imgUrl = getRedirectsImgUrl(imgUrl)
    imgName = imgUrl[imgUrl.rfind('/')+1:]
    #图片不存在进行下载
    imagePath = (filePath + "/" + imgName).strip()
    if not os.path.exists(imagePath):
        #下载失败再次尝试,最多尝试三次 
        attempts = 1
        while attempts < 3:
            try:
                image = requests.get(imgUrl.strip(), timeout = 10, headers = headers, allow_redirects=False)
                if image.status_code == 200:
                    imgFile = open(imagePath, 'wb')
                    imgFile.write(image.content)
                    imgFile.close()
                    logger.debug("%s下载完毕" % imgName)
                    break
                else:
                    attempts += 1
                    logger.info("请求{0}失败,状态码{1}".format(imgUrl, image.status_code))
                    logger.info("尝试第{0}次下载".format(attempts))
            except requests.exceptions.RequestException  as e:
                logger.warn("请求%s出错" % imgUrl)
                logger.warn(e)
                attempts += 1
                logger.info("尝试第{0}次下载".format(attempts))
    else:
        logger.debug("%s已存在,跳过" % imagePath)
        
'''
根据传入的图册url解析每个图片的链接,用于图片链接是规则形式
imageBaseUrl/x.jpg
'''
def getAtlasUrls(atlasUrl):
    try:
        if 'http' not in atlasUrl:
            atlasUrl = baseUrl + atlasUrl
        response = requests.get(atlasUrl, timeout = 5)
        if response.status_code == 200:
            # 对获取的文本进行解析
            soup = BeautifulSoup(response.content, 'lxml')
            firstImage = soup.select_one("div.content > div > a > img")
            pageInfo = soup.select_one("div.contentpage > span > i")
            if firstImage.get('src') and pageInfo.text:
                imageBaseUrl = firstImage.get('src')[:firstImage.get('src').rfind('/')+1]
                totalPages = int(pageInfo.text[pageInfo.text.find('/') + 1:pageInfo.text.find(')')])
                logger.info("{0}图集图片链接解析完成,共{1}张图片".format(atlasUrl, totalPages))
            else:
                logger.error("%s图集图片链接解析解析出错" % atlasUrl)
        else:
            logger.warn("请求{0}失败,状态码{1}".format(atlasUrl, response.status_code))
    except requests.exceptions.RequestException  as e:
        logger.warn("请求%s出错" % atlasUrl)
        logger.warn(e)
    
    return [imageBaseUrl + str(count) + '.jpg' for count in range(1, totalPages + 1)]

'''
解析url中图片的链接
'''
def getOneImgUrl(pageurl):
    try:
        response = requests.get(pageurl, timeout = 5)
        if response.status_code == 200:
            # 对获取的文本进行解析
            soup = BeautifulSoup(response.content, 'lxml')
            image = soup.select_one("div.content > div > a > img")
            if image.get('src'):
                return image.get('src').replace(" ", "")
            else:
                logger.error("%s图集图片链接解析解析出错" % pageurl)
        else:
            logger.warn("请求{0}失败,状态码{1}".format(pageurl, response.status_code))
    except requests.exceptions.RequestException  as e:
        logger.warn("请求%s出错" % pageurl)
        logger.warn(e)
    pass

'''
根据传入的atlasUrl解析每个图片的链接,用于图像链接不规则
'''
def getIrregularAtlasUrls(atlasUrl):
    try:
        imgUrls = []
        if 'http' not in atlasUrl:
            atlasUrl = baseUrl + atlasUrl
        response = requests.get(atlasUrl, timeout = 5)
        if response.status_code == 200:
            # 对获取的文本进行解析
            soup = BeautifulSoup(response.content, 'lxml')
            pageInfo = soup.select_one("div.contentpage > span > i")
            if pageInfo.text:
                totalPages = int(pageInfo.text[pageInfo.text.find('/') + 1:pageInfo.text.find(')')])
                imgUrls = map(getOneImgUrl, (atlasUrl + "/" + str(page) for page in range(1, totalPages + 1)))
                logger.info("{0}图集图片链接解析完成,共{1}张图片".format(atlasUrl, totalPages))
            else:
                logger.error("%s图集图片链接解析解析出错" % atlasUrl)
        else:
            logger.warn("请求{0}失败,状态码{1}".format(atlasUrl, response.status_code))
    except requests.exceptions.RequestException  as e:
        logger.warn("请求%s出错" % atlasUrl)
        logger.warn(e)
    
    return imgUrls
'''
接收图片下载路径和atlasUrl,
调用多线程下载类进行图片下载
'''
def downloadAtlas(filePath, atlasUrl):
    if 'http' not in atlasUrl:
        atlasUrl = baseUrl + atlasUrl
    #用来存放图片Url的队列,队列是 线程安全的
    urlQue = queue.Queue()
    #遍历返回的list,添加到队列
#     for url in getAtlasUrls(atlasUrl):
    for url in getIrregularAtlasUrls(atlasUrl):
        urlQue.put(url)
    #遍历队列每个图片启动一个线程下载
    for i in range(0, urlQue.qsize()):
        #限制最多十个线程
        while True:
            if threading.active_count() < 10:
                threadD = threadDownload(urlQue, filePath, atlasUrl + "/" + str(i), i)
                threadD.start()
                break
    #只有一个线程活动时,说明下载完成
    while threading.active_count() != 0:
        if threading.active_count() == 1:
            logger.info("%s下载完成" % filePath)
            logger.debug("线程休眠")
            time.sleep(3)
            logger.debug("休眠结束")
            return

# 根据每条连接的URL和标题,创建文件夹
# atlasUrlFile 标题和url文件
def mkDirAndDownload(atlasUrlsFile):
    if not os.path.exists(basePath):
        try:
            os.makedirs(basePath)
            logger.debug("%s创建成功" % basePath)
        except OSError as e:
            logger.error("目录%s不存在且创建失败" % basePath)
            logger.error(e)
    try:
        #计算总行数
        count = len(linecache.getlines(atlasUrlsFile))
        logger.info("共 %s个URL" % count)
        currentCount = 1
        # 逐行读取detail文件             
        while 1:
            line = linecache.getline(atlasUrlsFile, currentCount)
            if not line:
                logger.info("%s处理完成" % atlasUrlsFile)
                break
            else:
                tArr = line.split("@")
                if len(tArr) == 2:
                    #去除windows目录非法字符
                    title = re.sub('[\/:*?"<>|]','',tArr[0].strip())
                    atlasUrl = tArr[1].strip('\n')
                    path = basePath + "/" + title
                    try:
                        path = basePath + "/" + title
#                        判断目录是否存在,不存在的在进行处理
                        if not os.path.exists(path):
                            os.makedirs(path)
                            downloadAtlas(path, atlasUrl)
                        else:
                            logger.debug("%s已存在,跳过" % path)
                        s1 = "\r%d/%d    %f%%[%s%s]"%(currentCount, count, (currentCount/count) * 100,"*"*int((currentCount/count) * 100)," "*int(((count - currentCount)/count) * 100))
                        logger.info(s1)
                    except OSError as e:
                        logger.error("%s创建失败!" % path)
                        logger.error(e)
                else:
                    logger.info("%s格式错误!" % line)
            currentCount += 1
    except IOError as e:
        logger.error("%s打开失败" % atlasUrlsFile)
        logger.error(e)
def startDownload():       
    mkDirAndDownload(basePath + "/atlasUrl.txt")













