# -*- coding: utf-8 -*-
"""
@Time : 2023/9/24 23:36
@Author : ChenXiaoliang
@Email : middlegod@sina.com
@File : get_all_albums.py
"""

import requests
from bs4 import BeautifulSoup
import os
import time
import logging
import re

LOG_FORMAT = "%(levelname)s %(asctime)s - %(message)s"
logging.basicConfig(filename="log.log", level=logging.INFO, format=LOG_FORMAT)
logger = logging.getLogger()


def current_time():
    return time.strftime("%m-%d %H:%M:%S")


def save_pic(direct, pic_src, pic_no):
    """保存单个图片，根据目录direct，图片地址pic_src，图片保存名称pic_no."""
    pic_url = "https://www.xrmn03.cc" + pic_src.attrs['src']
    if not os.path.exists(direct + "/" + str(pic_no) + ".jpg"):
        for _ in range(20):
            try:
                pic_binary = requests.get(url=pic_url).content
                with open(direct + "/" + str(pic_no) + ".jpg", "wb") as img:
                    img.write(pic_binary)
                logger.info(
                    "{album}'s No.{num} pic saved.".format(album=direct, num=pic_no))
            except Exception:
                time.sleep(60)
            else:
                break
    else:
        logger.warning("{album}'s No.{num} pic already saved before.SKIPPED!".format(album=direct,
                                                                                     num=pic_no))


def get_full_album(album_address, title):
    """获取整个图集的图片，根据图集地址album_address和图集名称title"""
    if not os.path.exists(title):
        os.mkdir(title)
    # 图集中的第i张图片
    i = 1
    for album_page in range(100):
        album_page_url = (
                "https://www.xrmn03.cc/" + album_address) if album_page == 0 else "https://www.xrmn03.cc/" + album_address.rstrip(
            ".html") + "_" + str(album_page) + ".html"
        for _ in range(20):
            try:
                pics_page = requests.get(url=album_page_url)
            except requests.exceptions.ChunkedEncodingError as err:
                logger.error(f"occur {err}")
                time.sleep(60)
            except requests.exceptions.ConnectionError as err:
                logger.error(f"{current_time()} occur {err}")
                time.sleep(60)
            else:
                pics_page.encoding = "utf-8"
                pics_soup = BeautifulSoup(pics_page.text, 'html.parser')
                # 如果页面中不存在class_="content"，图集下载完成。
                if not pics_soup.findAll(class_="content"):
                    logger.info("{album} all downloaded".format(album=title))
                    return
                pics = pics_soup.findAll(class_="content")[0]
                pics = pics.findAll('img')
                if pics:
                    for pic in pics:
                        save_pic(title, pic, i)
                        i = i + 1
                break


for page in range(1, 24):
    url = "https://www.xrmn03.cc/XiuRen/" if page == 1 else "https://www.xrmn03.cc/XiuRen/index" + str(page) + ".html"
    resp = requests.get(url=url)
    resp.encoding = "utf-8"
    html = resp.text
    soup = BeautifulSoup(html, 'html.parser')
    albums = soup.findAll(class_="i_list list_n2")
    logger.info("Page No.{page} starting...".format(page=page))
    # 当前分页的第album_no个图集
    album_no = 1
    for album in albums:
        album_url = album.find('a').attrs['href']
        album_title = album.find('a').attrs['title']
        logger.info("No.{no} album starting...".format(no=album_no))
        # 以下条件语句判断图集文件中的照片数量和图集名称中提取出的数量是否一致。如果一致则跳过该次循环。
        if os.path.exists(album_title):
            num_from_title = re.search(r'\d{2,3}P', album_title).group().strip("P")
            # print(num_from_title)
            if len(os.listdir("./" + album_title)) == int(num_from_title):
                logger.info("No.{no} album all already have saved before!".format(no=album_no))
                album_left = 30 - album_no
                if album_left > 0:
                    logger.info(f"当前分页{page}，还剩{album_left}套待下载")
                album_no += 1
                continue
        get_full_album(album_url, album_title)
        logger.info("No.{no} album all saved!".format(no=album_no))
        album_left = 30 - album_no
        if album_left > 0:
            logger.info(f"当前分页{page}，还剩{album_left}套待下载")
        album_no += 1
    logger.info("Page No.{page} completed".format(page=page))
