import os
import json
import tensorflow as tf
from data_processor import DataProcessor
from model_builder import ModelBuilder
from model_trainer import ModelTrainer
from initialize_dataset import DatasetInitializer
from data_augmentor import DataAugmentor
from datetime import datetime
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tkinter import filedialog
import tkinter as tk

# 初始化tkinter
root = tk.Tk()
root.withdraw()  # 隐藏主窗口

def get_saved_models():
    """获取所有已保存的模型"""
    models = []
    
    # 检查多个可能的模型保存位置
    model_dirs = [
        'saved_models',  # 基本目录
        os.path.join('saved_models', 'checkpoints'),  # 检查点目录
    ]
    
    for model_dir in model_dirs:
        if not os.path.exists(model_dir):
            continue
            
        # 递归搜索所有子目录
        for root, _, files in os.walk(model_dir):
            for file in files:
                # 同时支持 .keras 和 .h5 格式
                if file.endswith(('.keras', '.h5')):
                    model_path = os.path.join(root, file)
                    # 获取模型信息
                    stats = os.stat(model_path)
                    models.append({
                        'name': file,
                        'path': model_path,
                        'size': stats.st_size,
                        'modified': stats.st_mtime,
                        'dir': os.path.relpath(root, start='saved_models')
                    })
    
    # 按修改时间排序
    return sorted(models, key=lambda x: x['modified'], reverse=True)

def select_model():
    """选择已保存的模型"""
    models = get_saved_models()
    if not models:
        print("\n错误���有找到已保存的模型！")
        print("请检查以下目录：")
        print("- saved_models/")
        print("- saved_models/checkpoints/")
        return None
    
    print("\n可用的模型：")
    for i, model in enumerate(models, 1):
        print(f"\n{i}. {model['name']}")
        print(f"   - 目录: {model['dir']}")
        print(f"   - 大小: {model['size'] / 1024 / 1024:.1f}MB")
        print(f"   - 修改时间: {datetime.fromtimestamp(model['modified']).strftime('%Y-%m-%d %H:%M:%S')}")
    
    while True:
        try:
            choice = input("\n请选择模型编号 (0 返回): ")
            if choice == '0':
                return None
            
            index = int(choice) - 1
            if 0 <= index < len(models):
                selected_model = models[index]
                print(f"\n已选择模型: {selected_model['name']}")
                return selected_model['path']
            else:
                print("无效的选择，请重试")
        except ValueError:
            print("无效的输入，请重试")

def initialize_dataset():
    """初始化数据集"""
    print("\n=== 数据集初始化 ===")
    initializer = DatasetInitializer()
    
    while True:
        print("\n请选择操作：")
        print("1. 创建/重置数据集文件夹")
        print("2. 重命名数据集图片")
        print("3. 验证数据集状态")
        print("4. 返回主菜单")
        
        choice = input("\n请输入选择 (1-4): ")
        
        if choice == '1':
            initializer.initialize_folders()
        elif choice == '2':
            initializer.rename_images()
        elif choice == '3':
            initializer.verify_dataset()
        elif choice == '4':
            break
        else:
            print("无效选择，请重新输入")

def train_model():
    """训练模型"""
    print("\n=== 模型训练 ===")
    
    # 检查数据集状态
    initializer = DatasetInitializer()
    initializer.verify_dataset()
    
    # 选择训练模式
    print("\n请选择训练模式：")
    print("1. 训练新模型")
    print("2. 继续训练已有模型")
    print("3. 返回主菜单")
    
    choice = input("\n请输入选择 (1-3): ")
    
    if choice == '3':
        return
    
    # 如果选择继续训练，加载已有模型
    model = None
    initial_epoch = 0
    if choice == '2':
        model_path = select_model()
        if model_path is None:
            return
        
        print(f"\n加载模型: {model_path}")
        try:
            model = tf.keras.models.load_model(model_path)
            # 获取已训练的轮次（从文件名中提取）
            filename = os.path.basename(model_path)
            if 'epoch_' in filename:
                try:
                    initial_epoch = int(filename.split('epoch_')[1].split('_')[0])
                    print(f"从第 {initial_epoch} 轮继续训练")
                except:
                    initial_epoch = 0
        except Exception as e:
            print(f"加载模型失败: {e}")
            return
    
    # 询问是否使用数据增强
    while True:
        print("\n请选择数据处理方式：")
        print("1. 使用原始数据集")
        print("2. 使用数据增强（推荐）")
        print("3. 返回主菜单")
        
        choice = input("\n请输入选择 (1-3): ")
        
        if choice == '1':
            dataset_path = 'dataset'
            break
        elif choice == '2':
            print("\n执行数据增强...")
            augmentor = DataAugmentor(
                target_dir='dataset/target',
                background_dir='dataset/background'
            )
            augmentor.augment_images(multiplier=3)
            dataset_path = 'dataset_augmented'
            break
        elif choice == '3':
            return
        else:
            print("无效选择，请重新输入")
    
    # 加载和预处理数据
    print(f"\n使用数据集: {dataset_path}")
    processor = DataProcessor(dataset_path)
    X, y, class_names = processor.load_and_preprocess()
    
    # 如果是新模型，需要构建
    if model is None:
        model_builder = ModelBuilder(input_shape=(224, 224, 3))
        model = model_builder.build_transfer_model()
    
    # 设置训练参数
    print("\n请设置训练参数：")
    try:
        epochs = int(input(f"训练轮数 (推荐: {20-initial_epoch}): ") or str(20-initial_epoch))
        batch_size = int(input("批次大小 (推荐: 16): ") or "16")
        validation_split = float(input("验证集比例 (推荐: 0.2): ") or "0.2")
    except ValueError:
        print("输入无效，使用默认值")
        epochs = 20 - initial_epoch
        batch_size = 16
        validation_split = 0.2
    
    # 训练模型
    trainer = ModelTrainer(model)
    history = trainer.train(
        X, y,
        validation_split=validation_split,
        batch_size=batch_size,
        epochs=epochs + initial_epoch,  # 总轮数
        initial_epoch=initial_epoch     # 起始轮数
    )
    
    print("\n训练完成！")
    return history

def predict_image(model, image_path):
    """预测单张图片"""
    try:
        # 加载和预处理图片
        img = tf.keras.preprocessing.image.load_img(
            image_path, 
            target_size=(224, 224)
        )
        img_array = tf.keras.preprocessing.image.img_to_array(img)
        img_array = tf.expand_dims(img_array, 0)
        img_array = img_array / 255.0
        
        # 进行预测
        prediction = model.predict(img_array)
        confidence = float(prediction[0][0])
        
        # 确定类别
        label = "Target" if confidence >= 0.5 else "Background"
        
        # 绘制预测结果
        result_img = draw_prediction(image_path, label, confidence)
        
        if result_img is not None:
            # 保存结果图片
            output_path = os.path.join(
                'results',
                f'pred_{os.path.basename(image_path)}'
            )
            os.makedirs('results', exist_ok=True)
            cv2.imwrite(output_path, result_img)
            print(f"结果已保存: {output_path}")
            
            # 显示结果
            plt.figure(figsize=(10, 10))
            plt.imshow(cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB))
            plt.axis('off')
            plt.show()
        
        return label, confidence
        
    except Exception as e:
        print(f"预测过程出错: {str(e)}")
        return None, None

def draw_prediction(image_path, prediction, confidence):
    """绘制预测结果和矩形框"""
    # 读取原始图片
    img = cv2.imread(image_path)
    if img is None:
        print(f"无法读取图片: {image_path}")
        return None
        
    # 获取图片尺寸
    height, width = img.shape[:2]
    
    if prediction == "Target":
        # 转换为灰度图
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
        # 使用高斯模糊减少噪声
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        
        # 使用Canny边缘检测
        edges = cv2.Canny(blurred, 50, 150)
        
        # 形态学操作改进
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
        edges = cv2.dilate(edges, kernel, iterations=1)
        
        # 查找轮廓
        contours, _ = cv2.findContours(
            edges,
            cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE
        )
        
        if contours:
            # 过滤小轮廓
            min_contour_area = (width * height) * 0.01  # 最小面积阈值
            valid_contours = [c for c in contours if cv2.contourArea(c) > min_contour_area]
            
            if valid_contours:
                # 计算所有有效轮廓的边界
                x_coords = []
                y_coords = []
                for contour in valid_contours:
                    x, y, w, h = cv2.boundingRect(contour)
                    x_coords.extend([x, x + w])
                    y_coords.extend([y, y + h])
                
                # 获取最小和最大坐标
                x = min(x_coords)
                y = min(y_coords)
                w = max(x_coords) - x
                h = max(y_coords) - y
                
                # 添加小幅度padding
                padding_x = int(w * 0.05)  # 5% padding
                padding_y = int(h * 0.05)  # 5% padding
                
                # 确保不超出图像边界
                x = max(0, x - padding_x)
                y = max(0, y - padding_y)
                w = min(width - x, w + 2 * padding_x)
                h = min(height - y, h + 2 * padding_y)
                
                # 绘制矩形框
                color = (0, 255, 0)  # 绿色
                thickness = max(2, int(min(width, height) / 300))  # 更细的线条
                cv2.rectangle(img, (x, y), (x + w, y + h), color, thickness)
                
                # 添加标签文本
                label = f"{prediction}: {confidence:.1%}"
                font = cv2.FONT_HERSHEY_SIMPLEX
                font_scale = min(width, height) / 1500  # 更小的字体
                font_thickness = max(1, int(font_scale * 2))
                
                # 获取文本大小
                (text_width, text_height), baseline = cv2.getTextSize(
                    label, font, font_scale, font_thickness
                )
                
                # 绘制文本背景
                cv2.rectangle(
                    img,
                    (x, max(0, y - text_height - 5)),
                    (x + text_width, y),
                    color,
                    -1
                )
                
                # 绘制文本
                cv2.putText(
                    img, 
                    label,
                    (x, max(text_height, y - 2)),
                    font,
                    font_scale,
                    (0, 0, 0),  # 黑色文字
                    font_thickness
                )
                
                # 调试信息
                print(f"检测到目标区域: x={x}, y={y}, w={w}, h={h}")
    else:
        # 背景图片只显示文本
        label = f"{prediction}: {confidence:.1%}"
        font = cv2.FONT_HERSHEY_SIMPLEX
        font_scale = min(width, height) / 1500
        font_thickness = max(1, int(font_scale * 2))
        
        cv2.putText(
            img, 
            label,
            (10, 30),
            font,
            font_scale,
            (0, 0, 255),  # 红色文字
            font_thickness
        )
    
    return img

def batch_predict(model, image_dir):
    """批量预测文件夹中的图片"""
    print(f"\n开始批量预测文件夹: {image_dir}")
    
    # 确保输出目录存在
    results_dir = 'results'
    os.makedirs(results_dir, exist_ok=True)
    
    # 获取所有图片文件
    image_extensions = ('.jpg', '.jpeg', '.png', '.bmp')
    image_files = [
        f for f in os.listdir(image_dir)
        if f.lower().endswith(image_extensions)
    ]
    
    if not image_files:
        print("未找到图片文件")
        return
    
    print(f"找到 {len(image_files)} 个图片文件")
    results = []
    
    # 批量处理
    for i, image_file in enumerate(image_files, 1):
        image_path = os.path.join(image_dir, image_file)
        print(f"\n处理图片 {i}/{len(image_files)}: {image_file}")
        
        try:
            # 加载和预处理图片
            img = tf.keras.preprocessing.image.load_img(
                image_path, 
                target_size=(224, 224)
            )
            img_array = tf.keras.preprocessing.image.img_to_array(img)
            img_array = tf.expand_dims(img_array, 0)
            img_array = img_array / 255.0
            
            # 进行预测
            prediction = model.predict(img_array, verbose=0)
            confidence = float(prediction[0][0])
            
            # 确定类别
            label = "Target" if confidence >= 0.5 else "Background"
            
            # 绘制预测结果
            result_img = draw_prediction(image_path, label, confidence)
            
            if result_img is not None:
                # 保存结果图片
                output_path = os.path.join(
                    results_dir,
                    f'pred_{image_file}'
                )
                cv2.imwrite(output_path, result_img)
                
                # 记录结果
                results.append({
                    'file': image_file,
                    'prediction': label,
                    'confidence': confidence,
                    'output_path': output_path
                })
                
                print(f"预测: {label} (置信度: {confidence:.1%})")
                print(f"结果已保存: {output_path}")
                
                # 显示结果
                plt.figure(figsize=(10, 10))
                plt.imshow(cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB))
                plt.title(f"{label}: {confidence:.1%}")
                plt.axis('off')
                plt.show()
                
        except Exception as e:
            print(f"处理图片出错: {str(e)}")
            continue
    
    # 打印汇总信息
    print("\n预测完成！")
    print(f"成功处理: {len(results)}/{len(image_files)} 个图片")
    print(f"结果保存在: {results_dir}")
    
    return results

def test_model():
    """测试模型"""
    print("\n=== 模型测试 ===")
    
    # 选择要测试的模型
    model_path = select_model()
    if model_path is None:
        print("未选择模型，返回主菜单")
        return
    
    try:
        # 加载模型
        print(f"\n加载模型: {model_path}")
        model = tf.keras.models.load_model(model_path)
        print("模型加载成功！")
        
        while True:
            print("\n请选择测试模式：")
            print("1. 测试单张图片")
            print("2. 批量测试文件夹")
            print("0. 返回主菜单")
            
            choice = input("请输入选择 (0-2): ").strip()
            
            if choice == '0':
                break
            elif choice == '1':
                # 选择单张图片
                image_path = filedialog.askopenfilename(
                    title="选择图片",
                    filetypes=[
                        ("图片文件", "*.jpg *.jpeg *.png *.bmp"),
                        ("所有文件", "*.*")
                    ]
                )
                if image_path:
                    predict_image(model, image_path)
            elif choice == '2':
                # 选择文件夹
                image_dir = filedialog.askdirectory(title="选择图片文件夹")
                if image_dir:
                    batch_predict(model, image_dir)
            else:
                print("无效的选择，请重试")
                
    except Exception as e:
        print(f"测试过程出错: {str(e)}")

def main():
    """主函数"""
    while True:
        print("\n=== 物品识别系统 ===")
        print("1. 初始化数据集")
        print("2. 训练模型")
        print("3. 测试模型")
        print("4. 退出程序")
        
        choice = input("\n请输入选择 (1-4): ")
        
        if choice == '1':
            initialize_dataset()
        elif choice == '2':
            train_model()
        elif choice == '3':
            test_model()
        elif choice == '4':
            print("\n感谢使用！再见！")
            break
        else:
            print("无效选择，请重新输入")

if __name__ == "__main__":
    main() 