import concurrent
from io import BytesIO

import requests
import torch
from PIL import Image
from torchvision import transforms
from transformers import CLIPProcessor, CLIPModel
from emotion_clip import EmotionCLIP  # 需安装emotion-clip库

# 初始化模型和处理器
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
emotion_model = EmotionCLIP.from_pretrained("emotion-clip/ViT-B-32").to(device)

# 定义情感标签集合（可自定义扩展）
emotion_labels = [
    "amusement", "anger", "awe", "contentment", "disgust",
    "excitement", "fear", "sadness", "neutral", "surprise"
]


def download_image(url):
    """下载图片并预处理"""
    try:
        response = requests.get(url, timeout=10)
        img = Image.open(BytesIO(response.content)).convert("RGB")
        return img
    except Exception as e:
        print(f"Error downloading {url}: {str(e)}")
        return None


def analyze_image(img):
    """分析单张图片的情感"""
    if img is None:
        return None

    # CLIP特征提取
    inputs = clip_processor(text=emotion_labels, images=img, return_tensors="pt", padding=True)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    outputs = clip_model(**inputs)
    logits_per_image = outputs.logits_per_image  # 图像与文本的相似度分数
    probs = logits_per_image.softmax(dim=1).cpu().detach().numpy()[0]

    # EmotionCLIP增强情感得分
    emotion_inputs = emotion_model.preprocess(img).unsqueeze(0).to(device)
    emotion_outputs = emotion_model(emotion_inputs)
    emotion_probs = torch.nn.functional.softmax(emotion_outputs.logits, dim=1).cpu().numpy()[0]

    # 融合两种模型的预测结果
    combined_probs = (probs + emotion_probs) / 2
    top_indices = combined_probs.argsort()[-3:][::-1]

    return {
        "predicted_emotions": [emotion_labels[i] for i in top_indices],
        "scores": combined_probs[top_indices].tolist(),
        "dominant_emotion": emotion_labels[top_indices[0]],
        "dominant_score": combined_probs[top_indices[0]]
    }


def process_url(url):
    """处理单个URL的所有图片"""
    # 示例：假设URL指向一个包含图片的网页，需解析所有<img>标签
    # 实际应用中需实现网页解析逻辑，此处简化为直接处理图片URL列表
    image_urls = get_image_urls_from_webpage(url)  # 需自行实现该函数

    results = []
    with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
        future_to_url = {executor.submit(download_image, u): u for u in image_urls}
        for future in concurrent.futures.as_completed(future_to_url):
            img = future.result()
            if img is not None:
                analysis = analyze_image(img)
                if analysis:
                    results.append({
                        "url": future_to_url[future],
                        "analysis": analysis
                    })
    return results


# 示例调用
if __name__ == "__main__":
    url = "https://www.baidu.com/s?wd=%E4%B8%BA%E4%BB%80%E4%B9%88%E8%B4%B7%E6%AC%BE%E9%AA%9A%E6%89%B0%E7%94%B5%E8%AF%9D%E7%AA%81%E7%84%B6%E5%8F%98%E5%B0%91%E4%BA%86&sa=fyb_n_homepage&rsv_dl=fyb_n_homepage&from=super&cl=3&tn=baidutop10&fr=top1000&rsv_idx=2&hisfilter=1"
    results = process_url(url)
    for item in results:
        print(f"Image URL: {item['url']}")
        print(f"Dominant Emotion: {item['analysis']['dominant_emotion']} ({item['analysis']['dominant_score']:.2f})")
        print(f"Detailed Scores: {dict(zip(item['analysis']['predicted_emotions'], item['analysis']['scores']))}")
        print("-" * 50)