import os
from PIL import Image
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, models
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from torchsummary import summary
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay

class FaceDataset(Dataset):
    def __init__(self, image_paths, labels, transform=None):
        self.image_paths = image_paths
        self.labels = labels
        self.transform = transform

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        img_path = self.image_paths[idx]
        label = self.labels[idx]
        image = Image.open(img_path).convert("RGB")
        if self.transform:
            image = self.transform(image)
        return image, label

class MobileNetV3Classifier(nn.Module):
    def __init__(self, pretrained=True):
        super(MobileNetV3Classifier, self).__init__()
        self.base_model = models.mobilenet_v3_small(pretrained=pretrained)
        in_features = self.base_model.classifier[3].in_features
        self.base_model.classifier[3] = nn.Linear(in_features, 2)

    def forward(self, x):
        return self.base_model(x)


def train(model, dataloader, optimizer, criterion, device):
    model.train()
    running_loss, correct, total = 0.0, 0, 0
    loop = tqdm(dataloader, desc="Training", leave=False)
    for images, labels in loop:
        images, labels = images.to(device), labels.to(device)
        optimizer.zero_grad()
        outputs = model(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item() * images.size(0)
        _, preds = torch.max(outputs, 1)
        correct += (preds == labels).sum().item()
        total += labels.size(0)

        loop.set_postfix(loss=loss.item())

    return running_loss / total, correct / total


def evaluate(model, dataloader, criterion, device):
    model.eval()
    running_loss, correct, total = 0.0, 0, 0
    loop = tqdm(dataloader, desc="Evaluating", leave=False)
    with torch.no_grad():
        for images, labels in loop:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)

            running_loss += loss.item() * images.size(0)
            _, preds = torch.max(outputs, 1)
            correct += (preds == labels).sum().item()
            total += labels.size(0)

            loop.set_postfix(loss=loss.item())

    return running_loss / total, correct / total


def load_image_paths(data_dir):
    real_dir = os.path.join(data_dir, "real")
    fake_dir = os.path.join(data_dir, "fake")
    image_paths = []
    labels = []

    for img in os.listdir(real_dir):
        image_paths.append(os.path.join(real_dir, img))
        labels.append(1)

    for img in os.listdir(fake_dir):
        image_paths.append(os.path.join(fake_dir, img))
        labels.append(0)

    return image_paths, labels


def plot_confusion_matrix(model, dataloader, device, class_names=["Fake", "Real"]):
    model.eval()
    all_preds = []
    all_labels = []

    with torch.no_grad():
        for images, labels in dataloader:
            images = images.to(device)
            outputs = model(images)
            _, preds = torch.max(outputs, 1)
            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.numpy())

    cm = confusion_matrix(all_labels, all_preds)
    disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=class_names)
    disp.plot(cmap=plt.cm.Blues)
    plt.title("Confusion Matrix on Test Set")
    plt.show()

def plot_metrics(train_accs, val_accs, train_losses, val_losses, save_path="metrics.png"):
    import matplotlib.pyplot as plt

    epochs = range(1, len(train_accs) + 1)
    fig, axs = plt.subplots(1, 2, figsize=(12, 5))

    # Accuracy
    axs[0].plot(epochs, train_accs, 'bo-', label='Train Accuracy')
    axs[0].plot(epochs, val_accs, 'ro-', label='Validation Accuracy')
    axs[0].set_title('Accuracy per Epoch')
    axs[0].set_xlabel('Epoch')
    axs[0].set_ylabel('Accuracy')
    axs[0].set_ylim(0, 1)
    axs[0].grid(True)
    axs[0].legend()

    # Loss
    axs[1].plot(epochs, train_losses, 'bo-', label='Train Loss')
    axs[1].plot(epochs, val_losses, 'ro-', label='Validation Loss')
    axs[1].set_title('Loss per Epoch')
    axs[1].set_xlabel('Epoch')
    axs[1].set_ylabel('Loss')
    axs[1].grid(True)
    axs[1].legend()

    plt.tight_layout()
    plt.savefig(save_path)
    plt.show()
    print(f" Metrics plot saved to {save_path}")


if __name__ == "__main__":
    train_dir = "real_vs_fake/real-vs-fake/train"
    val_dir = "real_vs_fake/real-vs-fake/test"
    train_accs = []
    val_accs = []
    train_losses = []
    val_losses = []


    train_paths, train_labels = load_image_paths(train_dir)
    val_paths, val_labels = load_image_paths(val_dir)

    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406],
                             [0.229, 0.224, 0.225])
    ])

    train_dataset = FaceDataset(train_paths, train_labels, transform)
    val_dataset = FaceDataset(val_paths, val_labels, transform)

    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")
    model = MobileNetV3Classifier().to(device)

    # 模型结构可视化
    summary(model, input_size=(3, 224, 224))

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    best_acc = 0.0
    os.makedirs("checkpoints", exist_ok=True)


    for epoch in range(10):
        print(f"\nEpoch {epoch+1}/10")
        train_loss, train_acc = train(model, train_loader, optimizer, criterion, device)
        val_loss, val_acc = evaluate(model, val_loader, criterion, device)

        train_accs.append(train_acc)
        val_accs.append(val_acc)
        train_losses.append(train_loss)
        val_losses.append(val_loss)

        print(f"Train Loss: {train_loss:.4f}, Acc: {train_acc:.4f}")
        print(f"Val   Loss: {val_loss:.4f}, Acc: {val_acc:.4f}")
        print(f"\n Average Train Accuracy: {sum(train_accs) / len(train_accs):.4f}")
        print(f" Average Test Accuracy: {sum(val_accs) / len(val_accs):.4f}")

    # 模型保存
    torch.save(model.state_dict(), "checkpoints/final_mobilenetv3_2.pth")
    print("Final model saved.")
    plot_metrics(train_accs, val_accs, train_losses, val_losses, save_path="metrics.png")
    plot_confusion_matrix(model, val_loader, device)