import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, models
from PIL import Image
import os, tqdm, logging

class LOODataset(Dataset):
    def __init__(self, csv_file, transform=None):
        self.df = pd.read_csv(csv_file)
        self.transform = transform

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        img_name = self.df.iloc[idx, 0]
        label = self.df.iloc[idx, 2]
        image = Image.open(img_name)
        if self.transform:
            image = self.transform(image)
        return image, label

experiment_dir = "experiment_0/"
job_id = "LOO_01"
device_id = "cuda:1"
log_dir =  os.path.join(experiment_dir, "logs")
checkpoint_dir =  os.path.join(experiment_dir, "check_points")
train_csv = f"{job_id}_train.csv"
test_csv = f"{job_id}_test.csv"
train_log = f"{job_id}_training.log"


transform = transforms.Compose([
    transforms.Resize((224,224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])

train_dataset = LOODataset(csv_file=os.path.join(experiment_dir, train_csv), transform=transform)
train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True)

val_dataset = LOODataset(csv_file=os.path.join(experiment_dir, test_csv), transform=transform)
val_loader = DataLoader(val_dataset, batch_size=4, shuffle=False)

device = torch.device(device_id if torch.cuda.is_available() else "cpu")
model = models.vit_b_16(weights="IMAGENET1K_V1").to(device)

num_ftrs = model.heads.head.in_features
model.heads.head = nn.Linear(num_ftrs, 2).to(device)


print("#### training completed ...")
logging.basicConfig(filename=os.path.join(log_dir, train_log), level=logging.INFO,
                    format='%(asctime)s - %(levelname)s - %(message)s')
logging.info("### training started ...")

criterion = nn.CrossEntropyLoss()
optimizer = optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-4)

num_epochs = 10
best_val_loss = float('inf')
best_model_path = os.path.join(checkpoint_dir, f"{job_id}_best_model.pth")

for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0

    with tqdm.tqdm(train_loader, desc=f"Epoch {epoch+1}/{num_epochs}", unit="batch") as pbar:
        for images, labels in pbar:
            images, labels = images.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            pbar.set_postfix(loss=running_loss / (pbar.n + 1))

    avg_train_loss = running_loss / len(train_loader)
    print(f"Epoch {epoch+1}/{num_epochs}, Loss: {avg_train_loss}")
    logging.info(f"Epoch {epoch+1}/{num_epochs}, Loss: {avg_train_loss}")

    model.eval()
    val_loss = 0.0
    correct_preds = 0
    total_preds = 0
    with torch.no_grad():
        for images, labels in val_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            val_loss += loss.item()

            preds = torch.argmax(outputs, dim=1)
            correct_preds += (preds == labels).sum().item()
            total_preds += labels.size(0)

    avg_val_loss = val_loss / len(val_loader)
    val_acc = correct_preds / total_preds
    print(f"Epoch {epoch+1}/{num_epochs}, Validation Loss: {avg_val_loss}, Accuracy: {val_acc:.4f}")
    logging.info(f"Epoch {epoch+1}/{num_epochs}, Validation Loss: {avg_val_loss}, Accuracy: {val_acc:.4f}")

    checkpoint_path = os.path.join(checkpoint_dir, f"{job_id}_epoch{epoch}.pth")
    torch.save(model.state_dict(), checkpoint_path)

    if avg_val_loss < best_val_loss:
        best_val_loss = avg_val_loss
        best_model_path = os.path.join(checkpoint_dir, f"{job_id}_best_model.pth")
        torch.save(model.state_dict(), best_model_path)
        logging.info(f"Best model saved at {best_model_path}")

print("#### training completed ...")
logging.info("#### training completed ...")