import os
import io
import base64
import uuid
import torch
import numpy as np
from PIL import Image
from flask import Flask, request, jsonify
from flask_cors import CORS
import sys

# 添加项目根目录到路径
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))

from models.colorization_model import ColorizationGenerator
from models.model_manager import ModelManager
from utils.data_utils import prepare_grayscale_image, lab_to_rgb, prepare_and_restore_grayscale_image, lab_to_rgb_with_resize

app = Flask(__name__)
CORS(app)  # 启用跨域请求

# 配置
UPLOAD_FOLDER = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'uploads')
RESULT_FOLDER = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'results')
MODEL_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models/weights/flower.pth')
IMG_SIZE = 256

# 创建目录
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
os.makedirs(RESULT_FOLDER, exist_ok=True)

# 全局变量
model = None
device = None
model_manager = None

def initialize_model_manager():
    """初始化模型管理器"""
    global model_manager
    if model_manager is None:
        model_manager = ModelManager()
    return model_manager

def load_model():
    """加载模型"""
    global model, device, model_manager
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 初始化模型管理器
    model_manager = initialize_model_manager()
    
    # 加载分类器
    model_manager.load_classifier()
    
    # 默认加载花卉模型作为备选
    model = model_manager.load_model('flower')
    
    return model

@app.route('/health', methods=['GET'])
def health_check():
    """健康检查接口"""
    return jsonify({'status': 'ok'})

@app.route('/colorize', methods=['POST'])
def colorize():
    """图像着色接口"""
    # 检查请求
    if 'image' not in request.files:
        return jsonify({'error': 'No image provided'}), 400
    
    file = request.files['image']
    if file.filename == '':
        return jsonify({'error': 'No image selected'}), 400
    
    # 获取模式参数（自动选择模型或指定模型）
    mode = request.form.get('mode', 'auto')
    model_type = request.form.get('model_type', 'flower')
    
    try:
        # 生成唯一ID
        unique_id = str(uuid.uuid4())
        
        # 保存上传的图像
        filename = file.filename
        input_path = os.path.join(UPLOAD_FOLDER, f"{unique_id}_{filename}")
        file.save(input_path)
        
        # 准备输入图像并获取原始尺寸
        L, original_size, original_image = prepare_and_restore_grayscale_image(input_path, model_img_size=IMG_SIZE)
        
        # 初始化模型管理器
        model_manager = initialize_model_manager()
        
        # 根据模式选择模型
        if mode == 'auto':
            # 自动选择模型
            colorization_model, image_class = model_manager.classify_and_get_model(original_image)
            print(f"图像分类结果: {image_class}，使用对应的模型")
        else:
            # 使用指定的模型
            colorization_model = model_manager.load_model(model_type)
            image_class = model_type
            print(f"使用指定的模型: {model_type}")
        
        # 将L通道移到设备上
        L = L.to(device)
        
        # 推理
        with torch.no_grad():
            ab_pred = colorization_model(L)
        
        # 转换为RGB并调整到原始尺寸
        L_np = L.cpu()
        ab_np = ab_pred.cpu()
        rgb_image = lab_to_rgb_with_resize(L_np, ab_np, original_size)[0]
        
        # 转换为PIL图像并保存
        output_image = Image.fromarray((rgb_image * 255).astype(np.uint8))
        result_path = os.path.join(RESULT_FOLDER, f"{unique_id}_{filename}")
        output_image.save(result_path)
        
        # 转换为base64
        buffered = io.BytesIO()
        output_image.save(buffered, format="JPEG")
        img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
        
        # 返回结果
        return jsonify({
            'success': True,
            'id': unique_id,
            'image': img_str,
            'image_class': image_class
        })
    
    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.route('/colorize_batch', methods=['POST'])
def colorize_batch():
    """批量图像着色接口"""
    # 检查请求
    if 'images' not in request.files:
        return jsonify({'error': 'No images provided'}), 400
    
    files = request.files.getlist('images')
    if len(files) == 0:
        return jsonify({'error': 'No images selected'}), 400
    
    # 获取模式参数（自动选择模型或指定模型）
    mode = request.form.get('mode', 'auto')
    model_type = request.form.get('model_type', 'flower')
    
    try:
        results = []
        
        # 初始化模型管理器
        model_manager = initialize_model_manager()
        
        for file in files:
            # 生成唯一ID
            unique_id = str(uuid.uuid4())
            
            # 保存上传的图像
            filename = file.filename
            input_path = os.path.join(UPLOAD_FOLDER, f"{unique_id}_{filename}")
            file.save(input_path)
            
            # 准备输入图像并获取原始尺寸
            L, original_size, original_image = prepare_and_restore_grayscale_image(input_path, model_img_size=IMG_SIZE)
            
            # 根据模式选择模型
            if mode == 'auto':
                # 自动选择模型
                colorization_model, image_class = model_manager.classify_and_get_model(original_image)
                print(f"图像 {filename} 分类结果: {image_class}，使用对应的模型")
            else:
                # 使用指定的模型
                colorization_model = model_manager.load_model(model_type)
                image_class = model_type
                print(f"使用指定的模型: {model_type}")
            
            # 将L通道移到设备上
            L = L.to(device)
            
            # 推理
            with torch.no_grad():
                ab_pred = colorization_model(L)
            
            # 转换为RGB并调整到原始尺寸
            L_np = L.cpu()
            ab_np = ab_pred.cpu()
            rgb_image = lab_to_rgb_with_resize(L_np, ab_np, original_size)[0]
            
            # 转换为PIL图像并保存
            output_image = Image.fromarray((rgb_image * 255).astype(np.uint8))
            result_path = os.path.join(RESULT_FOLDER, f"{unique_id}_{filename}")
            output_image.save(result_path)
            
            # 转换为base64
            buffered = io.BytesIO()
            output_image.save(buffered, format="JPEG")
            img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
            
            # 添加到结果列表
            results.append({
                'id': unique_id,
                'filename': filename,
                'image': img_str,
                'image_class': image_class
            })
        
        # 返回结果
        return jsonify({
            'success': True,
            'results': results
        })
    
    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.route('/models', methods=['GET'])
def list_models():
    """列出可用模型"""
    models_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models/weights')
    
    if not os.path.exists(models_dir):
        return jsonify({'models': []})
    
    models = []
    for file in os.listdir(models_dir):
        if file.startswith('generator_') and file.endswith('.pth'):
            models.append(file)
        elif file in ['celeba_net_G.pth', 'flower_net_G.pth', 'building_net_G.pth']:
            models.append(file)
    
    return jsonify({'models': models})

@app.route('/set_model', methods=['POST'])
def set_model():
    """设置使用的模型"""
    global model_manager
    
    data = request.json
    if 'model_type' not in data:
        return jsonify({'error': 'No model type provided'}), 400
    
    model_type = data['model_type']
    
    # 初始化模型管理器
    model_manager = initialize_model_manager()
    
    # 加载指定类型的模型
    try:
        model = model_manager.load_model(model_type)
        return jsonify({'success': True, 'model_type': model_type})
    except Exception as e:
        return jsonify({'error': str(e)}), 500

if __name__ == '__main__':
    # 加载模型
    load_model()
    
    # 启动服务
    port = int(os.environ.get('PORT', 8000))
    app.run(host='0.0.0.0', port=port) 