import requests
import base64
from PIL import Image
import io
import json
import time
import os

def check_ollama_service():
    """检查 Ollama 服务是否正常运行"""
    try:
        response = requests.get("http://localhost:11434/api/tags")
        if response.status_code == 200:
            print("✓ Ollama 服务正常运行")
            return True
        else:
            print(f"✗ Ollama 服务响应异常: {response.status_code}")
            return False
    except Exception as e:
        print(f"✗ Ollama 服务检查失败: {str(e)}")
        return False

def list_available_models():
    """列出可用的模型"""
    try:
        response = requests.get("http://localhost:11434/api/tags")
        if response.status_code == 200:
            models = response.json()
            print("\n可用的模型列表：")
            if not models.get("models"):
                print("当前没有可用的模型")
                return []
            for model in models.get("models", []):
                print(f"- {model['name']}")
            return [model['name'] for model in models.get("models", [])]
        return []
    except Exception as e:
        print(f"获取模型列表失败: {str(e)}")
        return []

def generate_image(prompt, negative_prompt="", num_inference_steps=30):
    """使用 Ollama 生成图片"""
    # 检查 Ollama 服务
    if not check_ollama_service():
        print("\n请先运行 'ollama serve' 启动服务")
        return None

    # 获取可用模型列表
    available_models = list_available_models()
    
    # 尝试不同的模型名称
    model_names = ["llava", "llava:13b", "llava:7b"]  # 使用 Llava 模型
    selected_model = None
    
    for model in model_names:
        if model in available_models:
            selected_model = model
            break
    
    if not selected_model:
        print("\n错误：未找到可用的图像生成模型")
        print("请先运行以下命令之一：")
        print("ollama pull llava")
        print("ollama pull llava:13b")
        print("ollama pull llava:7b")
        return None

    # Ollama API endpoint
    url = "http://localhost:11434/api/generate"
    
    # 准备请求数据
    data = {
        "model": selected_model,
        "prompt": prompt,
        "negative_prompt": negative_prompt,
        "num_inference_steps": num_inference_steps,
        "stream": False
    }
    
    try:
        print(f"\n正在使用 {selected_model} 模型生成图片...")
        print(f"提示词: {prompt}")
        print(f"负向提示词: {negative_prompt}")
        
        # 发送请求
        response = requests.post(url, json=data)
        
        if response.status_code == 200:
            result = response.json()
            if "image" in result:
                # 解码 base64 图片数据
                image_data = base64.b64decode(result["image"])
                # 转换为 PIL Image
                image = Image.open(io.BytesIO(image_data))
                return image
            else:
                print("响应中没有图片数据")
                print("响应内容:", json.dumps(result, indent=2))
                return None
        else:
            print(f"请求失败: HTTP {response.status_code}")
            print(f"请确保已经运行 'ollama pull {selected_model}' 命令")
            return None
            
    except Exception as e:
        print(f"发生错误: {str(e)}")
        return None

def save_image(image, prompt, index=0):
    """保存生成的图片"""
    # 创建输出目录
    output_dir = "generated_images"
    os.makedirs(output_dir, exist_ok=True)
    
    # 生成文件名
    timestamp = time.strftime("%Y%m%d_%H%M%S")
    filename = f"{output_dir}/image_{timestamp}_{index}.png"
    
    # 保存图片
    image.save(filename)
    print(f"图片已保存: {filename}")
    
    # 保存提示词
    prompt_file = f"{output_dir}/prompts_{timestamp}.txt"
    with open(prompt_file, "a", encoding="utf-8") as f:
        f.write(f"图片 {index}:\n")
        f.write(f"提示词: {prompt}\n")
        f.write(f"文件名: {filename}\n")
        f.write("-" * 50 + "\n")
    
    return filename

def main():
    print("=== Ollama 图像生成器 ===")
    
    # 检查 Ollama 服务
    if not check_ollama_service():
        print("\n请先运行 'ollama serve' 启动服务")
        return

    # 列出可用模型
    available_models = list_available_models()
    
    if not available_models:
        print("未找到任何可用模型")
        print("请先运行以下命令之一：")
        print("ollama pull llava")
        print("ollama pull llava:13b")
        print("ollama pull llava:7b")
        return
    
    # 示例提示词
    prompts = [
        {
            "prompt": "一只可爱的熊猫，正在吃竹子，高清摄影，自然光线",
            "negative_prompt": "模糊的, 扭曲的, 低质量的, 不自然的, 卡通风格"
        },
        {
            "prompt": "美丽的山水风景，有瀑布和彩虹，写实风格，高清摄影",
            "negative_prompt": "模糊的, 扭曲的, 低质量的, 不自然的, 卡通风格"
        }
    ]
    
    try:
        for i, prompt_data in enumerate(prompts):
            print(f"\n生成第 {i+1} 张图片...")
            image = generate_image(
                prompt=prompt_data["prompt"],
                negative_prompt=prompt_data["negative_prompt"]
            )
            
            if image is not None:
                save_image(image, prompt_data["prompt"], i)
            else:
                print(f"第 {i+1} 张图片生成失败")
        
        print("\n所有图片生成完成！")
        print("图片保存在 'generated_images' 目录下")
        
    except Exception as e:
        print(f"发生错误: {str(e)}")

if __name__ == "__main__":
    main() 