import os
import glob
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, random_split, Subset
import torch.nn as nn
import torch.optim as optim
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.signal import butter, sosfilt, resample
from scipy.ndimage import median_filter
import wfdb
import io
from PIL import Image
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
from sklearn.model_selection import train_test_split
import seaborn as sns
import wandb
import random
random.seed(42); np.random.seed(42); torch.manual_seed(42)
if torch.cuda.is_available(): torch.cuda.manual_seed_all(42)


# Naming Convention:
    # Signal = A full continuous PPG recording
    # Segment = A 20s window/segment extracted from the PPG recording/signal

# --- Filtering ---
def butter_bandpass(lowcut, highcut, fs=125, order=4):
    nyq = 0.5 * fs
    low = lowcut / nyq
    high = highcut / nyq
    sos = butter(order, [low, high], analog=False, btype='band', output='sos')
    return sos

def butter_bandpass_filter(signal, lowcut=0.5, highcut=5.0, fs=125, order=4):
    sos = butter_bandpass(lowcut, highcut, fs, order)
    return sosfilt(sos, signal)


# --- Load and Process Full PPG Signal ---
def load_ppg_signal(file_path, signal_name='PPG', fs=125, target_fs=30, denoise=True):
    # Load signal using WFDB
    signals, fields = wfdb.rdsamp(file_path)

    # Check for the presence of PPG signal and Find the index of PPG signal (ie. Find PPG channel)
    ppg_idx = next((i for i, name in enumerate(fields['sig_name']) if signal_name.upper() in name.upper()), None)
    if ppg_idx is None:
        print(f"[SKIPPED] No PPG found in {file_path}")
        return None
    
    # Extract and Clean signal
    # Replace any NaN values in the signal with 0.0
    # This ensures the signal doesn't crash during filtering, segmentation, or normalization
    signal = signals[:, ppg_idx]
    signal = np.nan_to_num(signal)

    # Denoise
    if denoise:
        signal = butter_bandpass_filter(signal, fs=fs)
        signal = median_filter(signal, size=5)

    # Downsample to target frequency (30Hz)
    if target_fs != fs:
        signal = resample(signal, int(len(signal) * target_fs / fs))
        fs = target_fs

    return signal.astype(np.float32), fs


# Split a full PPG recording into fixed-length 20s segments
def segment_signal(signal, fs=125, window_sec=20, threshold_std=0.015, file_path=""):
    seg_len = window_sec * fs
    segments = []

    if len(signal) < seg_len:
        return []

    for i, start in enumerate(range(0, len(signal) - seg_len + 1, seg_len)):
        end = start + seg_len
        seg = signal[start:end]

        # Validity check (skips segment if it's invalid/NaN or flat)
        # If the segment: 
            # 1. Still contains NaN values after cleaning (rare, unless not fully converted) OR
            # 2. The standard deviation is too low (std < 0.015)/Has very low variance (meaning it’s essentially flat or meaningless), 
                # This could be due to the segment originally being entirely made of NaNs which are all converted to 0s, causing it to become flat after replacement
        # The segment is flagged as "REMOVED" and it will be rejected and replaced with a blank black image (all zeros in the image tensor)
        if np.isnan(seg).any() or np.std(seg) < threshold_std:
            print(f"[REMOVED] Invalid/NaN/Flat segment in {file_path} | segment {i} std={np.std(seg):.4f}")
            continue

        segments.append(seg)
    return segments


# --- Create Image from PPG Segment ---
def create_ppg_image(segment, size=224, include_alpha_channel=False, y_scale=0.4, y_offset=0.4, lead_linewidth=0.1, dpi=300):
    # Convert a 20s PPG segment into a normalized plot image
    segment = segment - np.min(segment)
    max_val = np.max(segment)
    if max_val != 0:
        segment = segment / max_val
    else:
        segment[:] = 0

    t = np.arange(len(segment))
    fig = plt.figure(figsize=(size / dpi, size / dpi), dpi=dpi)
    fig.tight_layout(pad=0)
    ax = fig.add_subplot(111)
    ax.set_ylim([0, 1.5])
    ax.set_xlim([0, np.max(t)])
    ax.set_xticks([])
    ax.set_yticks([])
    ax.axis('off')

    y = segment * y_scale + y_offset
    y = np.clip(y, 0, 1.5)
    ax.plot(t, y, linewidth=lead_linewidth, color='k')

    buf = io.BytesIO()
    plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
    buf.seek(0)
    img = Image.open(buf).convert("RGBA" if include_alpha_channel else "RGB")
    img = img.resize((size, size))
    plt.close(fig)

    return np.array(img)

def save_ppg_grid(img_batch, labels, path="preview_batch.png"):
    """Save a small grid of images to a file (no GUI)."""
    label_map = {0: "Non-AF", 1: "AF"}
    imgs = img_batch.detach().cpu().numpy().transpose(0, 2, 3, 1)  # NCHW->NHWC
    n = len(imgs)
    cols = 3
    rows = (n + cols - 1) // cols

    plt.figure(figsize=(cols * 3, rows * 3))
    for i in range(n):
        plt.subplot(rows, cols, i + 1)
        plt.imshow(imgs[i])
        lab = int(labels[i].item()) if hasattr(labels[i], "item") else int(labels[i])
        plt.title(label_map.get(lab, str(lab)))
        plt.axis('off')
    plt.tight_layout()
    plt.savefig(path, dpi=150, bbox_inches="tight")
    plt.close()

# --- PPG Image Dataset ---
# Dataset for individual 20s PPG windows/segments, not full recordings (each treated as one sample)
class PPGSegmentDataset(Dataset):
    def __init__(self, segments, labels, output_image=True, size=224):
        self.segments = segments
        self.labels = labels
        self.output_image = output_image
        self.size = size

    def __len__(self):
        return len(self.segments)

    def __getitem__(self, idx):
        segment = self.segments[idx] # Get one 20s PPG segment
        raw_label = self.labels[idx]

        lab = float(raw_label)
        if lab not in (0.0, 1.0):
            # coerce instead of crash; log once if you want
            lab = 1.0 if lab >= 0.5 else 0.0
        lab_tensor = torch.tensor([lab], dtype=torch.float32)

        if self.output_image:
            # always define img, even if plotting fails
            try:
                img = create_ppg_image(segment, size=self.size)
            except Exception:
                # fallback to a black image so the loader never breaks
                img = np.zeros((self.size, self.size, 3), dtype=np.uint8)

            img_tensor = torch.from_numpy(img.transpose(2, 0, 1)).float() / 255.0
            return img_tensor, lab_tensor
        else:
            seg_tensor = torch.tensor(segment, dtype=torch.float32).unsqueeze(0)
            return seg_tensor, lab_tensor

# --- Get Files and Labels ---
def get_file_paths_and_labels(af_dir, non_af_dir):
    af_files = sorted([f.replace(".dat", "") for f in glob.glob(os.path.join(af_dir, "*.dat"))])
    non_af_files = sorted([f.replace(".dat", "") for f in glob.glob(os.path.join(non_af_dir, "*.dat"))])
    file_list = af_files + non_af_files
    labels = [1] * len(af_files) + [0] * len(non_af_files)
    return file_list, labels


# Prepare segments from all recordings/signals
def prepare_ppg_segments(file_list, labels, fs=125, window_sec=20):
    all_segments = []
    all_labels = []

    for f, lbl in zip(file_list, labels):
        try:
            signal, fs = load_ppg_signal(f, fs=125, target_fs=30) # Load the full continuous PPG recording
            if signal is None: 
                continue
            segs = segment_signal(signal, fs=fs, window_sec=20, file_path=f) # Split full recording into valid 20s segments
            for seg in segs:
                all_segments.append(seg)
                all_labels.append(lbl)
        except Exception as e:
            print(f"[ERROR] {f}: {e}")
    return all_segments, all_labels


# --- CNN Model ---
class SimpleCNN(nn.Module):
    def __init__(self, dropout=0.3):
        super(SimpleCNN, self).__init__()
        self.net = nn.Sequential(
            nn.Conv2d(3, 16, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(2),
            nn.Conv2d(16, 32, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(32 * 56 * 56, 128), nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(128, 1)
        )
    def forward(self, x):
        return self.net(x)


# --- Training Function ---
def train_model(config=None):
    with wandb.init(config=config):
        config = wandb.config

        # Use the device chosen in __main__
        device = GLOBAL_DEVICE
        print(f"[train_model] Using device: {device}")

        train_loader = GLOBAL_LOADERS["train"]
        val_loader   = GLOBAL_LOADERS["val"]
        test_loader  = GLOBAL_LOADERS["test"]

        # --- Save & log one preview grid to W&B (no GUI) ---
        try:
            imgs, lbls = next(iter(train_loader))
            save_ppg_grid(imgs, lbls, "preview_batch.png")
            wandb.log({"preview_batch": wandb.Image("preview_batch.png")})
        except Exception as e:
            print(f"[WARN] Could not create preview grid: {e}")

        model = SimpleCNN(dropout=config.dropout).to(device)
        criterion = nn.BCEWithLogitsLoss()
        optimizer = optim.Adam(model.parameters(), lr=config.lr) if config.optimizer == "adam" else optim.SGD(model.parameters(), lr=config.lr, momentum=0.9)

        train_losses, val_losses, val_accuracies = [], [], []

        for epoch in range(config.epochs):
            # Training
            model.train()
            running_loss = 0.0
            for imgs, lbls in train_loader:
                imgs = imgs.to(device, non_blocking=True)
                lbls = lbls.to(device, dtype=torch.float32,non_blocking=True)

                optimizer.zero_grad()
                logits = model(imgs)              # [B,1]
                loss = criterion(logits, lbls)    # lbls shape [B,1], float32
                loss.backward()
                optimizer.step()
                running_loss += loss.item()
            train_losses.append(running_loss / max(1, len(train_loader)))

            # Validation
            model.eval()
            all_preds, all_labels = [], []
            val_loss = 0.0
            with torch.no_grad():
                for imgs, lbls in val_loader:
                    imgs = imgs.to(device, non_blocking=True)
                    lbls = lbls.to(device, dtype=torch.float32, non_blocking=True)  # shape [B,1], values 0.0/1.0

                    logits = model(imgs)                      # [B,1]
                    loss = criterion(logits, lbls)
                    val_loss += loss.item()

                    probs = torch.sigmoid(logits).squeeze(1)  # [B]
                    preds = (probs > 0.5).to(torch.float32)   # [B]

                    # extend with plain 1D float arrays
                    all_preds.append(preds.cpu().numpy())     # list of (B,)
                    all_labels.append(lbls.squeeze(1).cpu().numpy())  # list of (B,)

            # --- Collate & sanitize for sklearn ---
            # 1) concatenate strictly as float64
            all_preds  = np.concatenate(all_preds, axis=0).astype(np.float64).reshape(-1)   # (N,)
            all_labels = np.concatenate(all_labels, axis=0).astype(np.float64).reshape(-1)  # (N,)

            # 2) keep only finite entries and labels in [0,1]
            mask = np.isfinite(all_preds) & np.isfinite(all_labels) & (all_labels >= 0.0) & (all_labels <= 1.0)
            all_preds, all_labels = all_preds[mask], all_labels[mask]

            # 3) binarize both sides safely
            all_preds  = (all_preds >= 0.5).astype(np.int32)
            all_labels = (all_labels >= 0.5).astype(np.int32)

            # 4) FINAL sanity check (crash early with helpful info)
            up = np.unique(all_preds)
            ul = np.unique(all_labels)
            if not np.all(np.isin(up, [0, 1])) or not np.all(np.isin(ul, [0, 1])):
                raise RuntimeError(f"Non-binary values detected AFTER cleaning. preds={up}, labels={ul}")

            # 5) metrics
            acc  = accuracy_score(all_labels, all_preds)
            prec = precision_score(all_labels, all_preds, average='binary', zero_division=0)
            rec  = recall_score(all_labels, all_preds, average='binary', zero_division=0)
            f1   = f1_score(all_labels, all_preds, average='binary', zero_division=0)

            val_losses.append(val_loss / max(1, len(val_loader)))
            val_accuracies.append(acc)

            wandb.log({
                "train_loss": train_losses[-1],
                "val_loss": val_losses[-1],
                "val_accuracy": acc,
                "val_precision": prec,
                "val_recall": rec,
                "val_f1": f1
            })

        # Final Test Evaluation
        y_true, y_pred = [], []
        with torch.no_grad():
            for imgs, lbls in test_loader:
                imgs = imgs.to(device, non_blocking=True)
                lbls = lbls.to(device, dtype=torch.float32, non_blocking=True)

                logits = model(imgs)
                probs  = torch.sigmoid(logits)
                preds  = (probs > 0.5).float()

                # collect
                y_true.extend(lbls.cpu().numpy())
                y_pred.extend(preds.cpu().numpy())

        # concatenate
        y_true = np.concatenate(y_true, axis=0).reshape(-1)
        y_pred = np.concatenate(y_pred, axis=0).reshape(-1)

        # drop NaNs / infs
        mask = np.isfinite(y_true) & np.isfinite(y_pred)
        y_true, y_pred = y_true[mask], y_pred[mask]

        # FORCE BINARY {0,1}
        y_true = (y_true >= 0.5).astype(int)
        y_pred = (y_pred >= 0.5).astype(int)

        # Optional safety check
        ut = np.unique(y_true); up = np.unique(y_pred)
        if not (set(ut) <= {0,1} and set(up) <= {0,1}):
            raise RuntimeError(f"Non-binary values detected in test. preds={up}, labels={ut}")

        cm = confusion_matrix(y_true, y_pred)
        plt.figure(figsize=(5, 4))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=['Non-AF', 'AF'], yticklabels=['Non-AF', 'AF'])
        plt.title("Confusion Matrix")
        plt.xlabel("Predicted")
        plt.ylabel("Actual")
        plt.savefig("confusion_matrix.png")
        plt.close()
        wandb.log({"confusion_matrix": wandb.Image("confusion_matrix.png")})

        # Plot Loss and Accuracy
        plt.figure()
        plt.plot(train_losses, label='Train Loss')
        plt.plot(val_losses, label='Validation Loss')
        plt.legend()
        plt.title('Loss Curve')
        plt.savefig("loss_curve.png")
        plt.close()
        wandb.log({"loss_curve": wandb.Image("loss_curve.png")})

        plt.figure()
        plt.plot(val_accuracies, label='Validation Accuracy')
        plt.legend()
        plt.title('Accuracy Curve')
        plt.savefig("accuracy_curve.png")
        plt.close()
        wandb.log({"accuracy_curve": wandb.Image("accuracy_curve.png")})


# --- Run Example ---
if __name__ == '__main__':
    wandb.login()

    af_dir = "/Users/luciano/School/Internship/mimic_perform_af_wfdb"
    non_af_dir = "/Users/luciano/School/Internship/mimic_perform_non_af_wfdb"

    file_list, labels = get_file_paths_and_labels(af_dir, non_af_dir)

    print(f"Found {len(file_list)} recordings. Segmenting into 20s windows...")

    segments, seg_labels = prepare_ppg_segments(file_list, labels)

    print(f"Final dataset: {len(segments)} segments ({len(seg_labels)} labels)")

    # --- Choose device early so we can set pin_memory appropriately ---
    if torch.cuda.is_available():
        GLOBAL_DEVICE = torch.device("cuda")
    elif torch.backends.mps.is_available():
        GLOBAL_DEVICE = torch.device("mps")  # Apple Silicon
    else:
        GLOBAL_DEVICE = torch.device("cpu")

    USE_PIN = (GLOBAL_DEVICE.type == "cuda")  # only helps on CUDA; harmless elsewhere
    NUM_WORKERS = 0  # safe default; you can try 2–4 later if CPU preprocessing becomes heavier
    print(f"[__main__] Using device: {GLOBAL_DEVICE}, pin_memory={USE_PIN}, num_workers={NUM_WORKERS}")

    # Generate dataset of 20s PPG segments and Split the dataset into train, val, and test sets
    dataset = PPGSegmentDataset(segments, seg_labels, output_image=True)

    # --- Stratified split on labels (AF/non-AF kept balanced across splits) ---
    labels_np = np.array(seg_labels)

    # Train (80%) vs Temp (20%)
    train_idx, temp_idx = train_test_split(
        np.arange(len(labels_np)),
        test_size=0.2,
        stratify=labels_np,
        random_state=42,
    )

    # Split temp into val (10%) and test (10%)
    temp_labels = labels_np[temp_idx]
    val_idx, test_idx = train_test_split(
        temp_idx,
        test_size=0.5,              # half of 20% -> 10%
        stratify=temp_labels,
        random_state=42,
    )

    # Wrap in Subset datasets
    train_set = Subset(dataset, train_idx)
    val_set   = Subset(dataset, val_idx)
    test_set  = Subset(dataset, test_idx)

    # DataLoaders (shuffle only for train)
    train_loader = DataLoader(train_set, batch_size=8, shuffle=True,
                          num_workers=0, pin_memory=USE_PIN)
    val_loader   = DataLoader(val_set,   batch_size=8, shuffle=False,
                          num_workers=0, pin_memory=USE_PIN)
    test_loader  = DataLoader(test_set,  batch_size=8, shuffle=False,
                          num_workers=0, pin_memory=USE_PIN)

    GLOBAL_LOADERS = {
        "train": train_loader,
        "val": val_loader,
        "test": test_loader,
    }

    # W&B Sweep
    sweep_config = {
        'method': 'grid',
        'parameters': {
            'lr': {'values': [0.001, 0.0005]},
            'batch_size': {'values': [8, 16]},
            'epochs': {'values': [5]},
            'optimizer': {'values': ['adam', 'sgd']},
            'dropout': {'values': [0.3, 0.5]}
        }
    }

    sweep_id = wandb.sweep(sweep=sweep_config, project="ppg-af-detection")
    wandb.agent(sweep_id, function=train_model)