import subprocess

import os
import time
import traceback
import requests
from urllib.parse import urlparse
from PIL import Image
import io
from concurrent.futures import ThreadPoolExecutor, as_completed

pic_file = "pics.txt"
base_url = "http://192.168.26.17/ai_img"
all_imgs = []

def load_img_data():
    with open(pic_file, "r", encoding="utf-8") as file:
        for idx, line in enumerate(file):
            line_info = line.strip()  # 使用 strip() 去除每行末尾的换行
            all_imgs.append(line_info)


def compress_image(image_data, quality=85):
    """
    压缩图片质量
    参数:
        image_data (bytes): 原始图片数据
        quality (int): 压缩质量(1-100)
    返回:
        tuple: (压缩后的图片数据, 原始大小, 压缩后大小)
    """
    try:
        img = Image.open(io.BytesIO(image_data))
        if img.mode != 'RGB':
            img = img.convert('RGB')

        original_size = len(image_data)
        output_buffer = io.BytesIO()
        img.save(output_buffer, format='JPEG', quality=quality, optimize=True)
        compressed_data = output_buffer.getvalue()
        compressed_size = len(compressed_data)

        return compressed_data, original_size, compressed_size
    except Exception as e:
        print(f"图片压缩失败: {e}")
        return image_data, len(image_data), len(image_data)  # 如果压缩失败，返回原始数据


def process_single_image(full_path,  compress=True, quality=85):
    """
    处理单个图片的下载和压缩
    参数:
        url (str): 图片URL
        folder_path (str): 保存目录
        compress (bool): 是否压缩
        quality (int): 压缩质量(1-100)
    返回:
        dict: 包含处理结果的字典
    """
    file_name = os.path.basename(full_path)
    folder_path = os.path.dirname(full_path)
    # 确保文件夹存在
    os.makedirs(f".{folder_path}", exist_ok=True)

    url = f"{base_url}/{full_path}"
    result = {
        'url': url,
        'success': False,
        'path': None,
        'original_size': 0,
        'compressed_size': 0,
        'error': None
    }

    try:
        # 发送HTTP GET请求获取图片
        response = requests.get(url, stream=True, timeout=10)
        response.raise_for_status()

        # 读取图片数据
        image_data = response.content
        result['original_size'] = len(image_data)

        # 如果需要压缩
        if compress:
            image_data, original_size, compressed_size = compress_image(image_data, quality)
            result['compressed_size'] = compressed_size
            result['original_size'] = original_size

        # 从URL中提取文件名
        parsed_url = urlparse(url)
        filename = os.path.basename(parsed_url.path)
        folder_path = os.path.dirname(full_path)

        # 完整的保存路径
        save_path = os.path.join(f".{folder_path}", filename)

        # 保存图片
        with open(save_path, 'wb') as f:
            f.write(image_data)

        result.update({
            'success': True,
            'path': save_path
        })

    except Exception as e:
        result['error'] = str(e)

    return result


def batch_download_and_compress(image_paths, compress=True, quality=85, max_workers=5):
    results = []
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 创建任务字典
        future_to_url = {
            executor.submit( process_single_image, path ,  compress, quality ) : path for path in image_paths
        }

        # 处理完成的任务
        for future in as_completed(future_to_url):
            url = future_to_url[future]
            try:
                result = future.result()
                results.append(result)
                if result['success']:
                    print(f"成功处理: {url}")
                    if compress:
                        reduction = (result['original_size'] - result['compressed_size']) / result[
                            'original_size'] * 100
                        print(
                            f"  大小: {result['original_size'] / 1024:.2f}KB → {result['compressed_size'] / 1024:.2f}KB (减少 {reduction:.1f}%)")
                else:
                    print(f"处理失败: {url} - 错误: {result['error']}")
            except Exception as e:
                print(f"处理 {url} 时发生异常: {e}")
                results.append({
                    'url': url,
                    'success': False,
                    'error': str(e)
                })

        # 打印汇总信息
    success_count = sum(1 for r in results if r['success'])
    total_original = sum(r['original_size'] for r in results if r['success'])
    total_compressed = sum(r['compressed_size'] for r in results if r['success'] and 'compressed_size' in r)

    print("\n===== 处理结果汇总 =====")
    print(f"总图片数: {len(image_paths)}")
    print(f"成功处理: {success_count}")
    print(f"失败处理: {len(image_paths) - success_count}")
    if compress and success_count > 0:
        total_reduction = (total_original - total_compressed) / total_original * 100
        print(f"总大小: {total_original / 1024:.2f}KB → {total_compressed / 1024:.2f}KB (减少 {total_reduction:.1f}%)")

    return results

def main():

    load_img_data()

    root_dir = '.'

    try:
        # for pic in all_imgs:
        #     # full_path = os.path.join(source_directory, pic)
        #     full_path = f"{root_dir}{pic}"
        #     file_name = os.path.basename(full_path)
        #     folder_path = os.path.dirname(full_path)
        #
        #     file_url = f"{base_url}{pic}"
        #     download_and_compress_image(file_url, folder_path, file_name, True, 30)

        batch_download_and_compress(
            image_paths=all_imgs,
            compress=False,
            quality=40,
            max_workers=10
        )

    except:
        err = traceback.format_exc()
        print(f"----err: {err}")





if __name__ == '__main__':
    main()

