import torch
from torch.utils.data import DataLoader
from network.Network import GazeRNN
from network.loss import angular_loss
from network.input_process import GazeDataset
import numpy as np
from tqdm import tqdm
import os

def train_one_fold(train_set, test_set, device, epochs=50, batch_size=10, save_dir="checkpoints", fold_idx=0):
    model = GazeRNN().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0002)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1000, gamma=0.98)
    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)

    os.makedirs(save_dir, exist_ok=True)

    for epoch in range(epochs):
        model.train()
        train_iter = tqdm(train_loader, desc=f"Fold {fold_idx} Epoch {epoch+1}/{epochs} [Train]", leave=False)
        for batch_x, batch_y in train_iter:
            batch_x, batch_y = batch_x.to(device), batch_y.to(device)
            optimizer.zero_grad()
            pred = model(batch_x)
            loss = angular_loss(pred, batch_y) + model.l1_regularization()
            loss.backward()
            optimizer.step()
            scheduler.step()
            train_iter.set_postfix(loss=loss.item())
        # 定时保存模型
        torch.save(model.state_dict(), os.path.join(save_dir, f"fold{fold_idx}_epoch{epoch+1}.pt"))
        print(f"Epoch {epoch+1}/{epochs} finished. Model saved.")

    # 测试
    model.eval()
    test_losses = []
    test_iter = tqdm(test_loader, desc=f"Fold {fold_idx} [Test]", leave=False)
    with torch.no_grad():
        for batch_x, batch_y in test_iter:
            batch_x, batch_y = batch_x.to(device), batch_y.to(device)
            pred = model(batch_x)
            loss = angular_loss(pred, batch_y)
            test_losses.append(loss.item())
            test_iter.set_postfix(loss=loss.item())
    avg_test_loss = np.mean(test_losses)
    return avg_test_loss

def loocv_train(data_dir, user_num=9, device='cuda'):
    results = []
    for test_user in range(1, user_num+1):
        try:
            print(f"LOOCV: Test user {test_user}")
            # 收集所有用户的数据
            train_indices = []
            test_indices = []
            user_datasets = []
            for user in tqdm(range(1, user_num+1), desc="加载用户数据"):
                try:
                    dataset = GazeDataset(data_dir, user) ####ROI一步有问题，会报错#####
                    if len(dataset) == 0:
                        print(f"User {user} 数据为空，跳过。")
                    user_datasets.append(dataset)
                except Exception as e:
                    print(f"加载用户{user}数据时出错: {e}")
                    user_datasets.append([])
            # 拼接所有用户数据
            all_features = []
            all_labels = []
            user_ranges = []
            start = 0
            for ds in user_datasets:
                try:
                    all_features.append([ds[i][0].numpy() for i in range(len(ds))])
                    all_labels.append([ds[i][1].numpy() for i in range(len(ds))])
                    user_ranges.append((start, start+len(ds)))
                    start += len(ds)
                except Exception as e:
                    print(f"处理用户数据时出错: {e}")
                    user_ranges.append((start, start))
            try:
                all_features = np.concatenate(all_features, axis=0)
                all_labels = np.concatenate(all_labels, axis=0)
            except Exception as e:
                print(f"拼接特征或标签时出错: {e}")
                continue
            # 构建索引
            for idx, (s, e) in enumerate(user_ranges):
                if idx+1 == test_user:
                    test_indices.extend(range(s, e))
                else:
                    train_indices.extend(range(s, e))
            print(f"train_indices: {len(train_indices)}, test_indices: {len(test_indices)}")
            if len(train_indices) == 0 or len(test_indices) == 0:
                print(f"训练集或测试集为空，跳过本折。")
                continue
            # 构建训练和测试集
            try:
                train_set = torch.utils.data.TensorDataset(
                    torch.tensor(all_features[train_indices], dtype=torch.float32).reshape(-1, 10, 21),
                    torch.tensor(all_labels[train_indices], dtype=torch.float32)
                )
                test_set = torch.utils.data.TensorDataset(
                    torch.tensor(all_features[test_indices], dtype=torch.float32).reshape(-1, 10, 21),
                    torch.tensor(all_labels[test_indices], dtype=torch.float32)
                )
            except Exception as e:
                print(f"构建TensorDataset时出错: {e}")
                continue
            avg_test_loss = train_one_fold(train_set, test_set, device, fold_idx=test_user)
            print(f"Test user {test_user}: Avg angular loss = {avg_test_loss:.4f}")
            results.append(avg_test_loss)
        except Exception as e:
            print(f"LOOCV第{test_user}折训练或测试时出错: {e}")
    print("LOOCV finished.")
    print("All test losses:", results)
    print("Mean test loss:", np.mean(results) if results else "无有效结果")
