'''
Author: focus-on-jiaran-dundundun 331197689@qq.com
Date: 2024-10-20 21:13:57
LastEditors: focus-on-jiaran-dundundun 331197689@qq.com
LastEditTime: 2024-10-26 09:44:37
FilePath: /Desktop/漫画爬虫/download.py
Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
'''
import os
from bs4 import BeautifulSoup
import utils
import search
import threading
import time
from curl_cffi import requests
from curl_cffi.requests.exceptions import SSLError
from requests.exceptions import RequestException  # requests 会抛的其他异常
import socket
import argparse

verify = 'E:/certificate/itsacg.crt'
# 设置请求头
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
    'Accept-Language': 'en-US,en;q=0.9',
    'Referer': 'https://www.itsacg.com/plugin.php?id=jameson_manhua'
}
# 获取网页内容
openUrl = 'https://www.itsacg.com/plugin.php?id=jameson_manhua&a=read&zjid='
def getImage(kid_list, name_list, img_path):
    for i in range(len(kid_list)):
        folder_path = os.path.expanduser(f'{img_path}/{name_list[i]}')
        try:
            os.makedirs(folder_path, exist_ok=True)
            print('漫画文件夹创建成功')
        except Exception as e:
            print(f'漫画文件夹创建失败: {e}')
            exit(1)

        url = f'{openUrl}{kid_list[i]}&nopage=1'

        # 请求网页内容（带异常处理）
        try:
            response = None
            for attempt in range(3):  # 最多重试3次
                try:
                    response = requests.get(url, headers=headers, timeout=10, verify=verify)
                    if response.status_code == 200:
                        break
                except (SSLError, ConnectionResetError, socket.error) as e:
                    print(f'第 {attempt+1} 次尝试失败: SSL 错误或连接被重置 - {e}')
                    time.sleep(2)
                except RequestException as e:
                    print(f'其他请求异常: {e}')
                    break
            else:
                print(f'请求失败: {url}')
                continue
        except Exception as e:
            print(f'请求页面失败: {url}, 错误: {e}')
            continue

        print(url)
        bs = BeautifulSoup(response.content, 'html.parser')
        imglist = bs.find_all('img', {'class': 'lazyload'})

        for img in imglist:
            img_url = img.attrs.get('data-src')
            if img_url:
                image_name = utils.extract_image_name(img_url)
                if image_name:
                    try:
                        img_response = None
                        for attempt in range(3):
                            try:
                                img_response = requests.get(img_url, headers=headers, timeout=10, verify=verify)
                                if img_response.status_code == 200:
                                    break
                            except (SSLError, ConnectionResetError, socket.error) as e:
                                print(f'图片请求失败，第 {attempt+1} 次: {e}')
                                time.sleep(2)
                            except RequestException as e:
                                print(f'图片请求发生异常: {e}')
                                break
                        else:
                            print(f'图片请求最终失败: {img_url}')
                            continue

                        with open(os.path.join(folder_path, image_name), 'wb') as f:
                            f.write(img_response.content)
                        print(f'下载成功: {img_url}')
                    except Exception as e:
                        print(f'下载失败: {img_url}, 错误: {e}')
                else:
                    print(f'无法提取图片名称: {img_url}')
            else:
                print('图片 URL 为空')
                
if __name__ == '__main__':
    #输入名称
    parser = argparse.ArgumentParser(description="爬取图片")
    parser.add_argument('-p', '--path', type=str, default="E:/漫画/", help="存储路径，默认为 E:/漫画/")
    parser.add_argument('-s', '--search', type=str, required=True, help="搜索字符串，如 dearie")
     # 解析命令行参数
    args = parser.parse_args()
    # 获取命令行参数
    download_path = args.path
    target_string = args.search
    img_path = os.path.join(download_path, target_string)
    while True:
        page_num = search.getPageList(target_string)
        if page_num is None:
            print('单页')
            kid_list,name_list = search.inputSearch(target_string)
            if len(kid_list) == 0:
                print('未找到结果，请重新输入')
                continue
            else:
                getImage(kid_list, name_list, img_path)
                break
        else:
            print('多页')
            threads = []
            for i in range(int(page_num)):
                kid_list,name_list = search.inputSourceByPage(target_string, i+1)
                thread_tmp = threading.Thread(target=getImage, args=(kid_list, name_list, img_path))
                threads.append(thread_tmp)
                thread_tmp.start()
                time.sleep(10)

            for thread_tmp in threads:
                thread_tmp.join()
        break
            
