'''
Created on 2018年1月14日

@author: shily
'''

from builtins import IOError
from bs4 import BeautifulSoup
import requests
from mmjpgConfig import logger, basePath, imgType, totalPages, tag


# 获取图集url
def getPageUrl(pageNum):
    url = "http://m.mmjpg.com/data.php?id=" + str(imgType) + "&page=" + str(pageNum)
    #请求url,获取响应内容
    try:
        response = requests.get(url, timeout = 5)
        if response.status_code == 200:
            # 对获取的文本进行解析
            soup = BeautifulSoup(response.content, 'lxml')
            titles = soup.select("li > h2 > a")
            # 不存在detail文件则创建
            detailFilePath = "{0}/atlasUrl.txt".format(basePath)
            try:
                with open(detailFilePath, 'a', encoding= 'UTF-8') as file:
                    #遍历
                    for item in titles :
                        if item.get("href") and item.text:
                            file.write("{0}@{1}\n".format(item.text, item.get("href")))
                    logger.info("第{0}页detail文件写入完毕,共{1}个url ".format(pageNum, len(titles)))
            except IOError as e:
                logger.error("%s打开/创建失败" % detailFilePath)
                logger.error(e)
        else:
            logger.error("请求{0}失败,状态码{1}".format(url, response.status_code))
    except requests.exceptions.RequestException  as e:
        logger.error("请求%s出错" % url)
        logger.error(e)
        
# 获取指定标签图集url
def getTagPageUrl(pageNum):
    url = "http://m.mmjpg.com/tag/" + tag + "/" + str(pageNum)
    #请求url,获取响应内容
    try:
        response = requests.get(url, timeout = 5)
        if response.status_code == 200:
            # 对获取的文本进行解析
            soup = BeautifulSoup(response.content, 'lxml')
            titles = soup.select("li > h2 > a")
            # 不存在detail文件则创建
            detailFilePath = "{0}/atlasUrl.txt".format(basePath)
            try:
                with open(detailFilePath, 'a', encoding= 'UTF-8') as file:
                    #遍历
                    for item in titles :
                        if item.get("href") and item.text:
                            file.write("{0}@{1}\n".format(item.text, item.get("href")))
                    logger.info("第%s页detail文件写入完毕 " % pageNum)
            except IOError as e:
                logger.error("%s打开/创建失败" % detailFilePath)
                logger.error(e)
        else:
            logger.error("请求{0}失败,状态码{1}".format(url, response.status_code))
    except requests.exceptions.RequestException  as e:
        logger.error("请求%s出错" % url)
        logger.error(e)
        
def getPageUrls():
    for page in range(1, totalPages + 1):
        getPageUrl(page)
		
def getTagPageUrls():
    for page in range(1, totalPages + 1):
        getTagPageUrl(page)



    
    
