#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
免费商用鲜花图片爬虫
从Unsplash和Pixabay获取高质量鲜花图片
"""

import os
import requests
import json
import time
import hashlib
from urllib.parse import urlparse
import oss2
from PIL import Image
import io

class FlowerImageCrawler:
    def __init__(self):
        # Unsplash API配置 (需要注册获取Access Key)
        self.unsplash_access_key = "YOUR_UNSPLASH_ACCESS_KEY"
        
        # Pixabay API配置 (需要注册获取API Key)
        self.pixabay_api_key = "YOUR_PIXABAY_API_KEY"
        
        # 阿里云OSS配置
        self.oss_access_key_id = "YOUR_OSS_ACCESS_KEY_ID"
        self.oss_access_key_secret = "YOUR_OSS_ACCESS_KEY_SECRET"
        self.oss_endpoint = "oss-cn-shanghai.aliyuncs.com"
        self.oss_bucket_name = "flower-1"
        
        # 本地存储目录
        self.local_dir = "downloaded_images"
        os.makedirs(self.local_dir, exist_ok=True)
        
        # 初始化OSS客户端
        try:
            auth = oss2.Auth(self.oss_access_key_id, self.oss_access_key_secret)
            self.bucket = oss2.Bucket(auth, self.oss_endpoint, self.oss_bucket_name)
        except Exception as e:
            print(f"OSS初始化失败: {e}")
            self.bucket = None
    
    def get_unsplash_images(self, query="flowers", per_page=30, pages=3):
        """从Unsplash获取图片"""
        images = []
        
        for page in range(1, pages + 1):
            url = f"https://api.unsplash.com/search/photos"
            params = {
                'query': query,
                'page': page,
                'per_page': per_page,
                'orientation': 'portrait'
            }
            headers = {
                'Authorization': f'Client-ID {self.unsplash_access_key}'
            }
            
            try:
                response = requests.get(url, params=params, headers=headers)
                if response.status_code == 200:
                    data = response.json()
                    for photo in data.get('results', []):
                        images.append({
                            'id': photo['id'],
                            'url': photo['urls']['regular'],
                            'thumb_url': photo['urls']['thumb'],
                            'description': photo.get('description', ''),
                            'alt_description': photo.get('alt_description', ''),
                            'source': 'unsplash'
                        })
                    print(f"Unsplash第{page}页获取成功，共{len(data.get('results', []))}张图片")
                else:
                    print(f"Unsplash API请求失败: {response.status_code}")
                
                time.sleep(1)  # 避免请求过快
                
            except Exception as e:
                print(f"获取Unsplash图片失败: {e}")
        
        return images
    
    def get_pixabay_images(self, query="flowers", per_page=20, pages=3):
        """从Pixabay获取图片"""
        images = []
        
        for page in range(1, pages + 1):
            url = "https://pixabay.com/api/"
            params = {
                'key': self.pixabay_api_key,
                'q': query,
                'image_type': 'photo',
                'orientation': 'vertical',
                'category': 'nature',
                'min_width': 400,
                'min_height': 600,
                'per_page': per_page,
                'page': page,
                'safesearch': 'true'
            }
            
            try:
                response = requests.get(url, params=params)
                if response.status_code == 200:
                    data = response.json()
                    for photo in data.get('hits', []):
                        images.append({
                            'id': str(photo['id']),
                            'url': photo['webformatURL'],
                            'thumb_url': photo['previewURL'],
                            'description': photo.get('tags', ''),
                            'alt_description': photo.get('tags', ''),
                            'source': 'pixabay'
                        })
                    print(f"Pixabay第{page}页获取成功，共{len(data.get('hits', []))}张图片")
                else:
                    print(f"Pixabay API请求失败: {response.status_code}")
                
                time.sleep(1)  # 避免请求过快
                
            except Exception as e:
                print(f"获取Pixabay图片失败: {e}")
        
        return images
    
    def download_image(self, image_info):
        """下载单张图片"""
        try:
            response = requests.get(image_info['url'], timeout=30)
            if response.status_code == 200:
                # 生成文件名
                url_hash = hashlib.md5(image_info['url'].encode()).hexdigest()[:8]
                filename = f"{image_info['source']}_{image_info['id']}_{url_hash}.jpg"
                
                # 处理图片
                img = Image.open(io.BytesIO(response.content))
                
                # 转换为RGB模式（如果是RGBA）
                if img.mode in ('RGBA', 'LA', 'P'):
                    img = img.convert('RGB')
                
                # 调整图片大小（保持比例）
                img.thumbnail((800, 1200), Image.Resampling.LANCZOS)
                
                # 保存到本地
                local_path = os.path.join(self.local_dir, filename)
                img.save(local_path, 'JPEG', quality=85)
                
                return {
                    'filename': filename,
                    'local_path': local_path,
                    'size': os.path.getsize(local_path),
                    'description': image_info.get('description', ''),
                    'source': image_info['source']
                }
                
        except Exception as e:
            print(f"下载图片失败 {image_info['url']}: {e}")
            return None
    
    def upload_to_oss(self, local_path, oss_key):
        """上传图片到阿里云OSS"""
        if not self.bucket:
            print("OSS未初始化，跳过上传")
            return None
            
        try:
            # 上传文件
            result = self.bucket.put_object_from_file(oss_key, local_path)
            if result.status == 200:
                oss_url = f"https://{self.oss_bucket_name}.{self.oss_endpoint}/{oss_key}"
                print(f"上传成功: {oss_url}")
                return oss_url
            else:
                print(f"上传失败: {result.status}")
                return None
        except Exception as e:
            print(f"OSS上传失败: {e}")
            return None
    
    def crawl_and_upload(self, categories=None):
        """爬取并上传图片"""
        if categories is None:
            categories = [
                "roses", "tulips", "sunflowers", "lilies", 
                "orchids", "daisies", "carnations", "peonies"
            ]
        
        all_results = []
        
        for category in categories:
            print(f"\n开始爬取 {category} 类别的图片...")
            
            # 从两个平台获取图片
            unsplash_images = self.get_unsplash_images(category, per_page=15, pages=2)
            pixabay_images = self.get_pixabay_images(category, per_page=15, pages=2)
            
            all_images = unsplash_images + pixabay_images
            print(f"{category} 类别共获取 {len(all_images)} 张图片")
            
            category_results = []
            
            for i, image_info in enumerate(all_images[:20]):  # 每个类别最多20张
                print(f"处理第 {i+1}/{min(20, len(all_images))} 张图片...")
                
                # 下载图片
                download_result = self.download_image(image_info)
                if download_result:
                    # 上传到OSS
                    oss_key = f"flowers/{category}/{download_result['filename']}"
                    oss_url = self.upload_to_oss(download_result['local_path'], oss_key)
                    
                    if oss_url:
                        category_results.append({
                            'category': category,
                            'filename': download_result['filename'],
                            'oss_url': oss_url,
                            'local_path': download_result['local_path'],
                            'description': download_result['description'],
                            'source': download_result['source']
                        })
                
                time.sleep(0.5)  # 避免请求过快
            
            all_results.extend(category_results)
            print(f"{category} 类别完成，成功处理 {len(category_results)} 张图片")
        
        # 保存结果到JSON文件
        with open('flower_images_results.json', 'w', encoding='utf-8') as f:
            json.dump(all_results, f, ensure_ascii=False, indent=2)
        
        print(f"\n爬取完成！总共处理 {len(all_results)} 张图片")
        print("结果已保存到 flower_images_results.json")
        
        return all_results

def main():
    """主函数"""
    print("=== 鲜花图片爬虫工具 ===")
    print("注意：使用前请先配置API密钥和OSS信息")
    
    crawler = FlowerImageCrawler()
    
    # 定义要爬取的鲜花类别
    flower_categories = [
        "red roses", "white roses", "pink roses",
        "tulips", "sunflowers", "lilies", 
        "orchids", "daisies", "carnations", 
        "peonies", "hydrangeas", "chrysanthemums"
    ]
    
    # 开始爬取
    results = crawler.crawl_and_upload(flower_categories)
    
    # 打印统计信息
    print("\n=== 爬取统计 ===")
    for category in set(r['category'] for r in results):
        count = len([r for r in results if r['category'] == category])
        print(f"{category}: {count} 张图片")

if __name__ == "__main__":
    main()