import os
import json
import time
import random
from typing import Dict

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim import AdamW
from torch.optim.lr_scheduler import CosineAnnealingLR

from .dataset import VideoMAEClipDataset
from .model import VideoMAEClassifier


def set_seed(seed: int = 42):
    try:
        random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
    except Exception:
        pass


def accuracy(output: torch.Tensor, target: torch.Tensor) -> float:
    with torch.no_grad():
        pred = output.argmax(dim=1)
        correct = (pred == target).sum().item()
        total = target.size(0)
    return correct / max(1, total)


def collate_clip(batch):
    xs, ys = zip(*batch)
    x = torch.stack(xs, dim=0)
    y = torch.stack(ys, dim=0)
    return x, y


 

def main(cfg_path: str = None):
    if cfg_path is None:
        cfg_path = os.path.join(os.path.dirname(__file__), 'configs', 'videomae_config.json')
    if not os.path.isfile(cfg_path):
        raise FileNotFoundError(cfg_path)
    with open(cfg_path, 'r', encoding='utf-8') as f:
        cfg = json.load(f)

    set_seed(int(cfg.get('seed', 2024)))

    dataset_name = cfg.get('dataset', 'deap_face')
    output_dir = cfg.get('output_dir', '.')
    experiment_mode = cfg.get('experiment_mode', 'subject-dependent')
    split_type = cfg.get('split_type', 'train-val-test')
    round_index = int(cfg.get('round_index', 1))

    save_base = os.path.join(output_dir, dataset_name)
    split_base = os.path.join(save_base, experiment_mode, split_type, f"round_{round_index}")
    train_csv = os.path.join(split_base, 'train.csv')
    val_csv = os.path.join(split_base, 'val.csv')
    test_csv = os.path.join(split_base, 'test.csv')

    num_frames = int(cfg.get('num_frames', 16))
    image_size = int(cfg.get('image_size', 224))
    batch_size = int(cfg.get('batch_size', 8))
    epochs = int(cfg.get('epochs', 50))
    lr = float(cfg.get('lr', 1e-4))
    weight_decay = float(cfg.get('weight_decay', 0.05))
    num_workers = int(cfg.get('num_workers', 4))
    pretrained_model = cfg.get('pretrained_model', 'MCG-NJU/videomae-base')
    pretrained = bool(cfg.get('pretrained', True))
    finetune_last_n_layers = int(cfg.get('finetune_last_n_layers', 4))
    label_remap = cfg.get('label_remap', None)
    drop_unmapped = bool(cfg.get('drop_unmapped', False))

    save_dir = cfg.get('save_dir', os.path.join(os.path.dirname(__file__), 'save'))
    os.makedirs(save_dir, exist_ok=True)

    # datasets
    ds_train = VideoMAEClipDataset(
        csv_path=train_csv,
        root_dir=save_base,
        num_frames=num_frames,
        image_size=image_size,
        is_train=True,
        label2idx=None,
        label_remap=label_remap,
        drop_unmapped=drop_unmapped,
    )
    label2idx: Dict[str, int] = ds_train.label2idx

    ds_val = VideoMAEClipDataset(
        csv_path=val_csv,
        root_dir=save_base,
        num_frames=num_frames,
        image_size=image_size,
        is_train=False,
        label2idx=label2idx,
        label_remap=label_remap,
        drop_unmapped=drop_unmapped,
    )
    ds_test = None
    if os.path.isfile(test_csv):
        ds_test = VideoMAEClipDataset(
            csv_path=test_csv,
            root_dir=save_base,
            num_frames=num_frames,
            image_size=image_size,
            is_train=False,
            label2idx=label2idx,
            label_remap=label_remap,
            drop_unmapped=drop_unmapped,
        )

    dl_train = DataLoader(ds_train, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True, collate_fn=collate_clip)
    dl_val = DataLoader(ds_val, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True, collate_fn=collate_clip)
    dl_test = None
    if ds_test is not None:
        dl_test = DataLoader(ds_test, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True, collate_fn=collate_clip)

    num_labels = len(label2idx)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model = VideoMAEClassifier(
        pretrained_model=pretrained_model,
        num_labels=num_labels,
        finetune_last_n_layers=finetune_last_n_layers,
        pretrained=pretrained,
    )
    model = model.to(device)

    optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, weight_decay=weight_decay)
    scheduler = CosineAnnealingLR(optimizer, T_max=max(1, epochs))
    criterion = nn.CrossEntropyLoss()

    best_val = 0.0
    best_path = os.path.join(save_dir, 'best.pth')
    last_path = os.path.join(save_dir, 'last.pth')

    with open(os.path.join(save_dir, 'label_map.json'), 'w', encoding='utf-8') as f:
        json.dump(label2idx, f, ensure_ascii=False, indent=2)
    with open(os.path.join(save_dir, 'config_used.json'), 'w', encoding='utf-8') as f:
        json.dump(cfg, f, ensure_ascii=False, indent=2)

    for epoch in range(1, epochs + 1):
        model.train()
        t0 = time.time()
        total_loss = 0.0
        total_acc = 0.0
        total_n = 0
        for bi, (x, y) in enumerate(dl_train):
            if x.dim() == 5 and x.shape[2] not in (1, 3) and x.shape[1] in (1, 3):
                x = x.permute(0, 2, 1, 3, 4).contiguous()
            if x.dim() == 5 and x.shape[2] == 1:
                x = x.repeat(1, 1, 3, 1, 1)
            if x.dim() == 5 and x.shape[2] > 3:
                x = x[:, :, :3, ...]
            x = x.contiguous().to(device, non_blocking=True).float()
            y = y.to(device, non_blocking=True)
            if epoch == 1 and bi == 0:
                print(f"train x shape: {tuple(x.shape)}", flush=True)
            optimizer.zero_grad(set_to_none=True)
            logits = model(pixel_values=x)
            loss = criterion(logits, y)
            loss.backward()
            optimizer.step()

            bs = y.size(0)
            total_loss += loss.item() * bs
            total_acc += accuracy(logits.detach(), y) * bs
            total_n += bs

        scheduler.step()
        tr_loss = total_loss / max(1, total_n)
        tr_acc = total_acc / max(1, total_n)

        model.eval()
        val_loss = 0.0
        val_acc = 0.0
        val_n = 0
        with torch.no_grad():
            for bi, (x, y) in enumerate(dl_val):
                if x.dim() == 5 and x.shape[2] not in (1, 3) and x.shape[1] in (1, 3):
                    x = x.permute(0, 2, 1, 3, 4).contiguous()
                if x.dim() == 5 and x.shape[2] == 1:
                    x = x.repeat(1, 1, 3, 1, 1)
                if x.dim() == 5 and x.shape[2] > 3:
                    x = x[:, :, :3, ...]
                x = x.contiguous().to(device, non_blocking=True).float()
                y = y.to(device, non_blocking=True)
                if epoch == 1 and bi == 0:
                    print(f"val x shape: {tuple(x.shape)}", flush=True)
                logits = model(pixel_values=x)
                loss = criterion(logits, y)
                bs = y.size(0)
                val_loss += loss.item() * bs
                val_acc += accuracy(logits, y) * bs
                val_n += bs
        val_loss /= max(1, val_n)
        val_acc /= max(1, val_n)

        print(f"epoch {epoch:03d} | train loss={tr_loss:.4f} acc={tr_acc:.4f} | val loss={val_loss:.4f} acc={val_acc:.4f} | time={time.time()-t0:.1f}s")

        torch.save({'epoch': epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, last_path)
        if val_acc >= best_val:
            best_val = val_acc
            torch.save({'epoch': epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, best_path)

    if dl_test is not None:
        ckpt = torch.load(best_path, map_location=device)
        model.load_state_dict(ckpt['model'])
        model.eval()
        test_acc = 0.0
        test_n = 0
        with torch.no_grad():
            for bi, (x, y) in enumerate(dl_test):
                if x.dim() == 5 and x.shape[2] not in (1, 3) and x.shape[1] in (1, 3):
                    x = x.permute(0, 2, 1, 3, 4).contiguous()
                if x.dim() == 5 and x.shape[2] == 1:
                    x = x.repeat(1, 1, 3, 1, 1)
                if x.dim() == 5 and x.shape[2] > 3:
                    x = x[:, :, :3, ...]
                x = x.contiguous().to(device, non_blocking=True).float()
                y = y.to(device, non_blocking=True)
                if epoch == 1 and bi == 0:
                    print(f"test x shape: {tuple(x.shape)}", flush=True)
                logits = model(pixel_values=x)
                bs = y.size(0)
                test_acc += accuracy(logits, y) * bs
                test_n += bs
        test_acc /= max(1, test_n)
        print(f"test acc={test_acc:.4f}")


if __name__ == '__main__':
    main()
