import requests
import base64
from PIL import Image
import io
import json
import time
import sys

def check_ollama_service():
    """检查 Ollama 服务是否正常运行"""
    try:
        response = requests.get("http://localhost:11434/api/tags")
        if response.status_code == 200:
            print("Ollama 服务正常运行")
            return True
        else:
            print(f"Ollama 服务响应异常: {response.status_code}")
            return False
    except Exception as e:
        print(f"Ollama 服务检查失败: {str(e)}")
        return False

def list_available_models():
    """列出可用的模型"""
    try:
        response = requests.get("http://localhost:11434/api/tags")
        if response.status_code == 200:
            models = response.json()
            print("\n可用的模型列表：")
            if not models.get("models"):
                print("当前没有可用的模型")
                return []
            for model in models.get("models", []):
                print(f"- {model['name']}")
            return [model['name'] for model in models.get("models", [])]
        else:
            print(f"获取模型列表失败: HTTP {response.status_code}")
            return []
    except Exception as e:
        print(f"获取模型列表失败: {str(e)}")
        return []

def generate_image_with_ollama(prompt, negative_prompt="", num_inference_steps=50):
    """使用 Ollama 的 Stable Diffusion 模型生成图片"""
    # 检查 Ollama 服务
    if not check_ollama_service():
        print("错误：Ollama 服务未运行，请先运行 'ollama serve'")
        return None

    # 获取可用模型列表
    available_models = list_available_models()
    
    # 尝试不同的模型名称
    model_names = ["llava", "sdxl", "stable-diffusion"]  # 使用更基础的模型
    selected_model = None
    
    for model in model_names:
        if model in available_models:
            selected_model = model
            break
    
    if not selected_model:
        print("\n错误：未找到可用的图像生成模型")
        print("请先运行以下命令之一：")
        print("ollama pull llava")
        print("ollama pull sdxl")
        print("ollama pull stable-diffusion")
        return None

    # Ollama API endpoint
    url = "http://localhost:11434/api/generate"
    
    # 准备请求数据
    data = {
        "model": selected_model,
        "prompt": prompt,
        "negative_prompt": negative_prompt,
        "num_inference_steps": num_inference_steps,
        "stream": False
    }
    
    try:
        print(f"\n正在使用 {selected_model} 模型生成图片，这可能需要几分钟时间...")
        print(f"提示词: {prompt}")
        print(f"负向提示词: {negative_prompt}")
        
        # 发送请求
        response = requests.post(url, json=data)
        
        if response.status_code == 200:
            result = response.json()
            # 检查是否包含图片数据
            if "image" in result:
                # 解码 base64 图片数据
                image_data = base64.b64decode(result["image"])
                # 转换为 PIL Image
                image = Image.open(io.BytesIO(image_data))
                return image
            else:
                print("响应中没有图片数据")
                print("响应内容:", json.dumps(result, indent=2))
                return None
        else:
            print(f"请求失败: HTTP {response.status_code}")
            print(f"请确保已经运行 'ollama pull {selected_model}' 命令")
            return None
            
    except Exception as e:
        print(f"发生错误: {str(e)}")
        return None

def main():
    print("Python 版本:", sys.version)
    print("开始运行图像生成程序...")
    
    # 检查 Ollama 服务
    if not check_ollama_service():
        print("错误：Ollama 服务未运行")
        print("请先运行 'ollama serve' 命令启动服务")
        return

    # 列出可用模型
    available_models = list_available_models()
    
    if not available_models:
        print("未找到任何可用模型")
        print("请先运行以下命令之一：")
        print("ollama pull llava")
        print("ollama pull sdxl")
        print("ollama pull stable-diffusion")
        return
    
    # 示例使用
    prompt = "一只可爱的猫咪，坐在窗台上看着窗外的雨，高清摄影"
    negative_prompt = "模糊的, 扭曲的, 低质量的, 不自然的"
    
    try:
        image = generate_image_with_ollama(prompt, negative_prompt)
        
        if image is not None:
            # 保存图片
            output_path = "ollama_generated_image.png"
            image.save(output_path)
            print(f"图片已生成并保存为: {output_path}")
        else:
            print("图片生成失败")
            print("\n请按照以下步骤操作：")
            print("1. 确保 Ollama 服务正在运行：ollama serve")
            print("2. 拉取模型：ollama pull llava")
            print("3. 再次运行此程序")
        
    except Exception as e:
        print(f"发生错误: {str(e)}")

if __name__ == "__main__":
    main() 