import csv
import os
import time
from typing import Union
import requests
from bs4 import BeautifulSoup
import urllib.request
import logging

cwd = os.getcwd()
LOG_MODE = Union['w', 'a']


# 获取要下载到的本地目录
def get_dir(url: str):
    url_arr = url.replace('//', '').replace('.shtml', '').split('/')
    dest_dir = f'{cwd}\\dest\\{url_arr[1]}_{url_arr[2]}_{url_arr[3]}'
    return dest_dir


class Main:

    def __init__(self):
        self.init_log()

    # 初始化日志实例
    def init_log(self):
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.DEBUG)
        console_handler = logging.StreamHandler()
        console_handler.setLevel(logging.DEBUG)
        self.logger.addHandler(console_handler)
        self.add_filehandler(f'{cwd}\\default.log', 'a')

    def add_filehandler(self, filename: str = 'default.log', mode: LOG_MODE = 'w'):
        self.file_handler = logging.FileHandler(filename=filename, mode=mode, encoding='utf-8')
        self.file_handler.setLevel(logging.DEBUG)
        LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
        self.file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
        self.logger.addHandler(self.file_handler)

    # 执行任务
    def run(self, file_path):
        self.logger.info('开始任务')
        self.process(file_path)
        self.logger.info('结束任务')

    def process(self, file_path):
        eq = 0
        # 打开 CSV 文件
        with open((file_path), newline='') as csvfile:
            # 创建 CSV 读取器
            reader = csv.reader(csvfile, delimiter=',')

            # 遍历 CSV 文件中的每一行
            for row in reader:
                # 用于测试, 少量测试几个网址
                if eq > 5:
                    break
                eq += 1
                url = row[0]
                if url == 'url':
                    continue
                self.logger.info(f'从 {url} 下载图片')
                self.mkdir(url)
                self.download_image(url)
                # 等待时间可以适当调大
                self.logger.info(f'等待 0.1 秒, 防止被认为网络攻击')
                time.sleep(0.1)

    # 创建本地文件夹
    def mkdir(self, url: str):
        dest_dir = get_dir(url)
        self.logger.info(f'创建本地文件夹: {dest_dir}')
        os.makedirs(name=dest_dir, exist_ok=True)

    # 下载图片
    def download_image(self, url: str):
        try:
            # 发送GET请求并获取响应
            response = requests.get(url)
            dest_dir = get_dir(url)

            # 检查响应状态码
            if response.status_code == 200:
                # 解析HTML内容
                soup = BeautifulSoup(response.text, "html.parser")

                # 查找所有的img标签
                img_tags = soup.find_all("img")

                index = 0

                # 下载图片
                for img_tag in img_tags:
                    img_url = img_tag["src"]

                    # 判断图片URL是否是相对路径，如果是，则拼接完整的URL
                    if not img_url.startswith("http"):
                        continue

                    index += 1
                    # 下载图片
                    urllib.request.urlretrieve(img_url, f'{dest_dir}\\{index}.jpg')

                    self.logger.info(f"已下载图片：{img_url}")
            else:
                self.logger.error(f'请求失败：{response.status_code}')
        except Exception as e:
            self.logger.error(f'捕获异常：{e}')


if __name__ == '__main__':
    Main().run(file_path='./rgfy_news.csv')
    print('finish')
