import os
import random
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, WeightedRandomSampler
from torch.cuda.amp import GradScaler, autocast
from torch.optim import AdamW
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts
from tqdm import tqdm

from data_loader import PPG2BPDataset
from ppg2bpnet import PPG2BPNetOptimized

# -------------------
# 1) Prepare datasets + sampler
# -------------------
train_ds = PPG2BPDataset(split_txt="data/data_split/split_train.txt", data_root="data/final2")
val_ds   = PPG2BPDataset(split_txt="data/data_split/split_val.txt",   data_root="data/final2")
test_ds  = PPG2BPDataset(split_txt="data/data_split/split_test.txt",  data_root="data/final2")

# extract SBP values to build stratified sampler
sbp_vals = np.array([train_ds[i]["sbp_true"].item() for i in range(len(train_ds))])
q25, q50, q75 = np.quantile(sbp_vals, [0.25, 0.5, 0.75])
bands = np.digitize(sbp_vals, [q25, q50, q75])  # 0..3
counts = np.bincount(bands, minlength=4)
weights = 1.0 / counts[bands]
sampler = WeightedRandomSampler(weights, num_samples=len(weights), replacement=True)

train_loader = DataLoader(
    train_ds, batch_size=64, sampler=sampler,
    num_workers=4, pin_memory=True
)
val_loader = DataLoader(
    val_ds, batch_size=64, shuffle=False,
    num_workers=2, pin_memory=True
)
test_loader = DataLoader(
    test_ds, batch_size=64, shuffle=False,
    num_workers=2, pin_memory=True
)

# -------------------
# 2) Model, optimizer, scheduler, AMP scaler
# -------------------
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = PPG2BPNetOptimized().to(device)

optimizer = AdamW(model.parameters(), lr=2e-4, weight_decay=1e-4)
# restart every 4 epochs worth of steps, then double cycle length
scheduler = CosineAnnealingWarmRestarts(
    optimizer,
    T_0=len(train_loader)*4,
    T_mult=2,
    eta_min=1e-6
)
scaler = GradScaler()

# -------------------
# 3) Training / evaluation funcs
# -------------------
def train_one_epoch(epoch, beta=4.0):
    model.train()
    total_sbp_loss = 0.0
    total_dbp_loss = 0.0
    total_samples = 0
    pbar = tqdm(train_loader, desc=f"Epoch {epoch} [Train]")
    for batch in pbar:
        optimizer.zero_grad()
        ppg_t = batch["ppg_target"].to(device)
        ppg_c = batch["ppg_calib"].to(device)
        sbp_c = batch["sbp_calib"].to(device)
        dbp_c = batch["dbp_calib"].to(device)
        sbp_t = batch["sbp_true"].to(device)
        dbp_t = batch["dbp_true"].to(device)

        with autocast():
            pred = model(ppg_t, ppg_c, sbp_c, dbp_c)
            sbp_pred, dbp_pred = pred[:,0], pred[:,1]
            loss_sbp = F.smooth_l1_loss(sbp_pred, sbp_t, beta=beta)
            loss_dbp = F.smooth_l1_loss(dbp_pred, dbp_t, beta=beta)
            loss = 0.7 * loss_sbp + 0.3 * loss_dbp

        scaler.scale(loss).backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 2.0)
        scaler.step(optimizer)
        scaler.update()
        scheduler.step(epoch + pbar.n / len(train_loader))

        batch_size = ppg_t.size(0)
        total_sbp_loss += loss_sbp.item() * batch_size
        total_dbp_loss += loss_dbp.item() * batch_size
        total_samples += batch_size

        pbar.set_postfix({
            "SBP-Loss": f"{(total_sbp_loss/total_samples):.3f}",
            "DBP-Loss": f"{(total_dbp_loss/total_samples):.3f}"
        })
    return total_sbp_loss/total_samples, total_dbp_loss/total_samples

@torch.no_grad()
def evaluate(loader):
    model.eval()
    total_sbp_mae = 0.0
    total_dbp_mae = 0.0
    total_hit5 = 0
    total = 0
    pbar = tqdm(loader, desc="   [Evaluate]")
    for batch in pbar:
        ppg_t = batch["ppg_target"].to(device)
        ppg_c = batch["ppg_calib"].to(device)
        sbp_c = batch["sbp_calib"].to(device)
        dbp_c = batch["dbp_calib"].to(device)
        sbp_t = batch["sbp_true"].to(device)
        dbp_t = batch["dbp_true"].to(device)

        pred = model(ppg_t, ppg_c, sbp_c, dbp_c)
        sbp_pred, dbp_pred = pred[:,0], pred[:,1]

        total_sbp_mae += F.l1_loss(sbp_pred, sbp_t, reduction="sum").item()
        total_dbp_mae += F.l1_loss(dbp_pred, dbp_t, reduction="sum").item()
        total_hit5    += ((sbp_pred - sbp_t).abs() <= 5.0).sum().item()
        total += ppg_t.size(0)

    return (
        total_sbp_mae/total,
        total_dbp_mae/total,
        total_hit5/total
    )

# -------------------
# 4) Training loop with clinical early stopping
# -------------------
best_hit5 = 0.0
stall = 0
patience = 20
max_epochs = 100

for epoch in range(1, max_epochs+1):
    sbp_tr, dbp_tr = train_one_epoch(epoch)
    sbp_va, dbp_va, hit5_va = evaluate(val_loader)

    print(f"Epoch {epoch:02d} | "
          f"Train MAE SBP {sbp_tr:.3f} DBP {dbp_tr:.3f} | "
          f"Val   MAE SBP {sbp_va:.3f} DBP {dbp_va:.3f} | "
          f"%≤±5mmHg {hit5_va*100:.1f}%"
    )

    # early-stop on clinical metric
    if hit5_va > best_hit5 + 1e-3:
        best_hit5 = hit5_va
        stall = 0
        torch.save(model.state_dict(), "best_ppg2bp_net.pth")
        print(" ↳ New best clinical hit-rate, model saved.")
    else:
        stall += 1
        if stall >= patience:
            print("Stopping early (no clinical improvement for"
                  f" {patience} epochs).")
            break

# -------------------
# 5) Final test
# -------------------
model.load_state_dict(torch.load("best_ppg2bp_net.pth"))
sbp_te, dbp_te, hit5_te = evaluate(test_loader)
print(f"\nTEST | MAE SBP {sbp_te:.3f} DBP {dbp_te:.3f} | "
      f"%≤±5mmHg {hit5_te*100:.1f}%")
