import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, WeightedRandomSampler
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
import os
import time
import psutil

# 创建目录
model_dir = 'models/AdaBoost'
result_dir = 'results/AdaBoost'
viz_dir = 'visualization/AdaBoost'
os.makedirs(model_dir, exist_ok=True)
os.makedirs(result_dir, exist_ok=True)
os.makedirs(viz_dir, exist_ok=True)

# 超参数
n_estimators = 20
input_dim = 784
hidden_dim = 128
output_dim = 10
batch_size = 256
epochs_per_model = 3
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 数据预处理
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

# 加载数据
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(root='./data', train=False, transform=transform)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)


# 弱分类器定义
class WeakClassifier(nn.Module):
    def __init__(self):
        super().__init__()
        self.layer = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, output_dim)
        )

    def forward(self, x):
        x = x.view(x.size(0), -1)
        return self.layer(x)


# AdaBoost实现
class AdaBoost:
    def __init__(self):
        self.models = []
        self.alphas = []
        self.metrics = {
            'train_loss': [], 'train_acc': [],
            'val_acc': [], 'val_loss': [],
            'misclassification': [], 'times': [],
            'memory': [], 'gpu_memory': []
        }

    def fit(self, train_dataset, val_loader):
        sample_weights = torch.ones(len(train_dataset)) / len(train_dataset)
        sample_weights = sample_weights.to(device)

        for t in range(n_estimators):
            start_time = time.time()

            # 创建带权重的采样器
            sampler = WeightedRandomSampler(sample_weights.cpu(), len(train_dataset), replacement=True)
            train_loader = DataLoader(train_dataset, batch_size=batch_size, sampler=sampler)

            # 训练弱分类器
            model = WeakClassifier().to(device)
            optimizer = optim.Adam(model.parameters())
            criterion = nn.CrossEntropyLoss()

            model.train()
            epoch_loss, epoch_acc = [], []
            for epoch in range(epochs_per_model):
                total_loss, correct = 0, 0
                for data, target in train_loader:
                    data, target = data.to(device), target.to(device)
                    optimizer.zero_grad()
                    output = model(data)
                    loss = criterion(output, target)
                    loss.backward()
                    optimizer.step()

                    total_loss += loss.item() * data.size(0)
                    pred = output.argmax(dim=1)
                    correct += pred.eq(target).sum().item()

                epoch_loss.append(total_loss / len(train_dataset))
                epoch_acc.append(correct / len(train_dataset))
                print(f'Estimator {t + 1} Epoch {epoch + 1}: Loss: {epoch_loss[-1]:.4f}, Acc: {epoch_acc[-1]:.4f}')

            self.metrics['train_loss'].extend(epoch_loss)
            self.metrics['train_acc'].extend(epoch_acc)

            # 计算错误率
            model.eval()
            all_preds, all_targets = [], []
            with torch.no_grad():
                for data, target in DataLoader(train_dataset, batch_size=batch_size):
                    data, target = data.to(device), target.to(device)
                    output = model(data)
                    all_preds.append(output.argmax(dim=1).cpu())
                    all_targets.append(target.cpu())

            all_preds = torch.cat(all_preds)
            all_targets = torch.cat(all_targets)
            incorrect = (all_preds != all_targets).float()
            epsilon_t = (sample_weights.cpu() * incorrect).sum().item()

            if epsilon_t >= 0.5 or epsilon_t == 0:
                print(f"Estimator {t + 1} skipped (ε={epsilon_t:.4f})")
                continue

            alpha_t = 0.5 * np.log((1 - epsilon_t) / epsilon_t)

            # 更新权重
            sample_weights *= torch.exp(alpha_t * (1 - 2 * incorrect.to(device)))
            sample_weights /= sample_weights.sum()

            # 保存模型和参数
            self.models.append(model)
            self.alphas.append(alpha_t)

            # 验证集评估
            val_acc, val_loss = self.evaluate(val_loader)
            self.metrics['val_acc'].append(val_acc)
            self.metrics['val_loss'].append(val_loss)
            self.metrics['misclassification'].append(1 - val_acc)

            # 记录资源使用
            self.metrics['times'].append(time.time() - start_time)
            self.metrics['memory'].append(psutil.Process().memory_info().rss / 1024 ** 2)
            if device.type == 'cuda':
                torch.cuda.synchronize()
                self.metrics['gpu_memory'].append(torch.cuda.max_memory_allocated() / 1024 ** 2)
            else:
                self.metrics['gpu_memory'].append(0)

            print(f'Estimator {t + 1} trained | Val Acc: {val_acc:.4f} | Time: {self.metrics["times"][-1]:.2f}s')

        return self

    def evaluate(self, data_loader):
        criterion = nn.CrossEntropyLoss()
        total_loss, correct = 0, 0
        with torch.no_grad():
            for data, target in data_loader:
                data, target = data.to(device), target.to(device)
                output = self.predict(data)
                loss = criterion(output, target)
                total_loss += loss.item() * data.size(0)
                correct += output.argmax(dim=1).eq(target).sum().item()
        return correct / len(data_loader.dataset), total_loss / len(data_loader.dataset)

    def predict(self, x):
        preds = torch.zeros((x.size(0), output_dim)).to(device)
        for alpha, model in zip(self.alphas, self.models):
            output = model(x)
            preds += alpha * torch.softmax(output, dim=1)
        return preds


# 训练模型
adaboost = AdaBoost()
adaboost.fit(train_dataset, test_loader)

# 保存模型
torch.save({
    'models': [model.state_dict() for model in adaboost.models],
    'alphas': adaboost.alphas,
}, os.path.join(model_dir, 'adaboost.pth'))

# 测试评估
y_true, y_pred = [], []
with torch.no_grad():
    for data, target in test_loader:
        data = data.to(device)
        output = adaboost.predict(data)
        y_pred.extend(output.argmax(dim=1).cpu().numpy())
        y_true.extend(target.numpy())

precision, recall, f1, _ = precision_recall_fscore_support(y_true, y_pred, average='macro')
accuracy = accuracy_score(y_true, y_pred)

# 保存结果
with open(os.path.join(result_dir, 'results.txt'), 'w') as f:
    f.write(f'Accuracy: {accuracy:.4f}\n')
    f.write(f'Precision: {precision:.4f}\n')
    f.write(f'Recall: {recall:.4f}\n')
    f.write(f'F1 Score: {f1:.4f}\n')
    f.write(f'Total Training Time: {sum(adaboost.metrics["times"]):.2f}s\n')
    f.write(f'Max Memory Usage: {max(adaboost.metrics["memory"]):.2f} MB\n')
    f.write(f'Max GPU Memory Usage: {max(adaboost.metrics["gpu_memory"]):.2f} MB\n')

# 可视化曲线
plt.figure(figsize=(12, 6))
plt.subplot(2, 2, 1)
plt.plot(adaboost.metrics['train_acc'], label='Train')
plt.plot(np.linspace(0, len(adaboost.metrics['train_acc']), len(adaboost.metrics['val_acc'])),
         adaboost.metrics['val_acc'], label='Validation')
plt.title('Accuracy Curve')
plt.legend()

plt.subplot(2, 2, 2)
plt.plot(adaboost.metrics['train_loss'], label='Train')
plt.plot(np.linspace(0, len(adaboost.metrics['train_loss']), len(adaboost.metrics['val_loss'])),
         adaboost.metrics['val_loss'], label='Validation')
plt.title('Loss Curve')
plt.legend()

plt.subplot(2, 2, 3)
plt.plot(adaboost.metrics['misclassification'])
plt.title('Misclassification Rate')

plt.tight_layout()
plt.savefig(os.path.join(viz_dir, 'training_curves.png'))

# 保存预测示例
data, target = next(iter(test_loader))
data = data.to(device)
output = adaboost.predict(data)
preds = output.argmax(dim=1).cpu()

plt.figure(figsize=(12, 6))
for i in range(10):
    plt.subplot(2, 5, i + 1)
    plt.imshow(data[i].cpu().squeeze(), cmap='gray')
    plt.title(f'Pred: {preds[i]}, True: {target[i]}')
    plt.axis('off')
plt.savefig(os.path.join(viz_dir, 'pred_examples.png'))