#!/usr/bin/env python
# -*- coding: UTF-8 -*-

import os
import sys
import requests
from io import BytesIO
from PIL import Image
import time
import random
import json

# 添加项目路径到系统路径
project_path = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(project_path)

# 导入MinIO客户端
from backend.tools.minio_client import minio_client

class AuthenticImageCrawler:
    """真实图片爬虫类"""
    
    def __init__(self):
        """初始化爬虫"""
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        # 真实图片搜索源
        self.image_sources = [
            "https://api.unsplash.com/photos/random",
            "https://api.pexels.com/v1/search"
        ]
        
        # 项目需要的图片列表和对应的搜索关键词
        self.image_requirements = {
            'banners': [
                {"name": "banner1", "keyword": "massage therapy", "width": 600, "height": 300},
                {"name": "banner2", "keyword": "spa treatment", "width": 600, "height": 300}
            ],
            'icons': [
                {"name": "massage", "keyword": "massage icon", "width": 100, "height": 100},
                {"name": "spa", "keyword": "spa icon", "width": 100, "height": 100},
                {"name": "footcare", "keyword": "foot massage", "width": 100, "height": 100},
                {"name": "bodycare", "keyword": "body care", "width": 100, "height": 100}
            ],
            'avatars': [
                {"name": "technician1", "keyword": "massage therapist", "width": 200, "height": 200},
                {"name": "technician2", "keyword": "spa therapist", "width": 200, "height": 200},
                {"name": "technician3", "keyword": "beauty therapist", "width": 200, "height": 200}
            ],
            'services': [
                {"name": "service1", "keyword": "full body massage", "width": 200, "height": 200},
                {"name": "service2", "keyword": "foot massage", "width": 200, "height": 200}
            ]
        }
    
    def crawl_from_unsplash(self, keyword, count=1):
        """
        从Unsplash爬取图片
        :param keyword: 搜索关键词
        :param count: 返回图片数量
        :return: 图片URL列表
        """
        try:
            # 注意：在实际使用中，您需要注册Unsplash API并获取访问密钥
            # 这里使用公开的随机图片API作为示例
            url = f"https://source.unsplash.com/featured/?{keyword}"
            response = requests.get(url, headers=self.headers, timeout=10)
            if response.status_code == 200:
                content_type = response.headers.get('content-type', '')
                if 'image' in content_type:
                    return [response.url]
        except Exception as e:
            print(f"从Unsplash爬取图片失败: {e}")
        return []
    
    def crawl_from_pexels(self, keyword, count=1):
        """
        从Pexels爬取图片
        :param keyword: 搜索关键词
        :param count: 返回图片数量
        :return: 图片URL列表
        """
        try:
            # 使用Pexels的随机图片服务
            url = f"https://images.pexels.com/photos/random/?random={random.randint(1, 1000)}"
            response = requests.get(url, headers=self.headers, timeout=10)
            if response.status_code == 200:
                content_type = response.headers.get('content-type', '')
                if 'image' in content_type:
                    return [response.url]
        except Exception as e:
            print(f"从Pexels爬取图片失败: {e}")
        return []
    
    def crawl_from_pixabay(self, keyword, count=1):
        """
        从Pixabay爬取图片
        :param keyword: 搜索关键词
        :param count: 返回图片数量
        :return: 图片URL列表
        """
        try:
            # 使用Pixabay的随机图片服务
            url = f"https://pixabay.com/get/57e8d5444a5bae14f6da8c7dda793f7f1036dfe35254774974297ed09e4dc45f_640.jpg"
            response = requests.get(url, headers=self.headers, timeout=10)
            if response.status_code == 200:
                content_type = response.headers.get('content-type', '')
                if 'image' in content_type:
                    return [response.url]
        except Exception as e:
            print(f"从Pixabay爬取图片失败: {e}")
        return []
    
    def crawl_authentic_image(self, keyword):
        """
        从多个源爬取真实的图片
        :param keyword: 搜索关键词
        :return: 图片二进制数据
        """
        print(f"正在搜索真实图片: {keyword}")
        
        # 尝试多个图片源
        sources = [
            lambda k: self.crawl_from_unsplash(k),
            lambda k: self.crawl_from_pexels(k),
            lambda k: self.crawl_from_pixabay(k)
        ]
        
        for source_func in sources:
            try:
                urls = source_func(keyword)
                if urls:
                    for url in urls:
                        print(f"尝试从 {url} 获取图片...")
                        response = requests.get(url, headers=self.headers, timeout=10)
                        if response.status_code == 200:
                            content_type = response.headers.get('content-type', '')
                            if 'image' in content_type:
                                print(f"成功获取真实图片: {keyword}")
                                return response.content
                        # 添加延迟避免被封
                        time.sleep(random.uniform(1, 2))
            except Exception as e:
                print(f"从图片源获取图片失败: {e}")
                continue
        
        print(f"无法获取真实图片: {keyword}")
        return None
    
    def resize_image(self, image_data, max_width, max_height):
        """
        调整图片大小
        :param image_data: 图片二进制数据
        :param max_width: 最大宽度
        :param max_height: 最大高度
        :return: 调整后的图片二进制数据
        """
        try:
            image = Image.open(BytesIO(image_data))
            
            # 如果图片尺寸超过限制，则调整大小
            if image.width > max_width or image.height > max_height:
                image.thumbnail((max_width, max_height), Image.Resampling.LANCZOS)
            
            # 转换为RGB模式（如果需要）
            if image.mode in ('RGBA', 'LA', 'P'):
                # 创建白色背景
                background = Image.new('RGB', image.size, (255, 255, 255))
                if image.mode == 'P':
                    image = image.convert('RGBA')
                background.paste(image, mask=image.split()[-1] if image.mode in ('RGBA', 'LA') else None)
                image = background
            
            # 保存为JPEG格式
            output = BytesIO()
            image.save(output, format='JPEG', quality=85)
            return output.getvalue()
        except Exception as e:
            print(f"调整图片大小失败: {e}")
            return image_data  # 返回原始数据
    
    def upload_to_minio(self, image_data, folder, filename):
        """
        上传图片到MinIO
        :param image_data: 图片二进制数据
        :param folder: 存储文件夹
        :param filename: 文件名
        :return: 图片URL
        """
        try:
            object_name = f"{folder}/{filename}"
            minio_client.upload_bytes(object_name, image_data, content_type="image/jpeg")
            url = minio_client.presigned_get_url(object_name)
            return url
        except Exception as e:
            print(f"上传图片到MinIO失败: {e}")
            return None
    
    def crawl_and_upload_all_images(self):
        """
        爬取并上传所有需要的真实图片
        """
        print("开始爬取并上传所有真实图片...")
        
        # 存储所有图片URL
        image_urls = {}
        
        # 按类别爬取图片
        for category, images in self.image_requirements.items():
            print(f"\n=== 爬取 {category} 类别图片 ===")
            category_urls = {}
            
            for img_info in images:
                name = img_info["name"]
                keyword = img_info["keyword"]
                width = img_info["width"]
                height = img_info["height"]
                
                print(f"\n--- 爬取 {name} ({keyword}) ---")
                
                # 爬取真实图片
                image_data = self.crawl_authentic_image(keyword)
                if image_data:
                    # 调整大小
                    resized_data = self.resize_image(image_data, width, height)
                    # 上传到MinIO
                    filename = f"{name}.jpg"
                    url = self.upload_to_minio(resized_data, category, filename)
                    if url:
                        category_urls[name] = url
                        print(f"{name} 上传成功: {url}")
                    else:
                        print(f"{name} 上传失败")
                else:
                    print(f"{name} 爬取失败")
                
                # 添加延迟
                time.sleep(random.uniform(1, 3))
            
            image_urls[category] = category_urls
        
        print("\n所有真实图片爬取和上传完成!")
        return image_urls

def main():
    """主函数"""
    crawler = AuthenticImageCrawler()
    urls = crawler.crawl_and_upload_all_images()
    
    print("\n=== 真实图片URL汇总 ===")
    for category, url_dict in urls.items():
        print(f"\n{category}:")
        for name, url in url_dict.items():
            print(f"  {name}: {url}")

if __name__ == "__main__":
    main()