import argparse
import os
from collections import defaultdict
from typing import Any, Dict, List

import fasternet
import lightning as L
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import yaml
from dataset import CustomDataset
from PIL import Image
from torch.nn import functional as F
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from torchvision.models import resnet50
from argparse import Namespace
import math
import warnings
from torch._utils import _accumulate


class AvgMeter:
    def __init__(self, name="", fmt=":f"):
        self.name, self.fmt = name, fmt
        self.reset()

    def reset(self):
        self.val, self.avg, self.sum, self.count = 0, 0, 0, 0

    def update(self, val, n=1):
        self.val, self.sum, self.count = val, self.sum + val * n, self.count + n
        self.avg = self.sum / self.count


class LinearWarmupCosineAnnealingLR(_LRScheduler):
    def __init__(
        self, optimizer, warmup_epochs, max_epochs, warmup_start_lr, eta_min=0
    ):
        self.warmup_epochs, self.max_epochs = warmup_epochs, max_epochs
        self.warmup_start_lr, self.eta_min = warmup_start_lr, eta_min
        self.cycle_epochs = max_epochs - warmup_epochs
        super().__init__(optimizer)

    def get_lr(self):
        if self.last_epoch < self.warmup_epochs:
            alpha = self.last_epoch / self.warmup_epochs
            return [
                self.warmup_start_lr + alpha * (base_lr - self.warmup_start_lr)
                for base_lr in self.base_lrs
            ]
        else:
            progress = (self.last_epoch - self.warmup_epochs) / self.cycle_epochs
            return [
                self.eta_min
                + 0.5 * (base_lr - self.eta_min) * (1 + np.cos(np.pi * progress))
                for base_lr in self.base_lrs
            ]


def merge_args_cfg(args, cfg):
    dict0, dict1 = vars(args), vars(cfg)
    for k, v in dict0.items():
        if v is not None or k not in dict1:
            dict1[k] = v
    return Namespace(**dict1)


def load_cfg(cfg):
    hyp = None
    if isinstance(cfg, str):
        try:
            with open(cfg, errors="ignore") as f:
                hyp = yaml.safe_load(f)
        except Exception as e:
            print("no that file")
    return Namespace(**hyp)


def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--backbone", default="fasternet", choices=["fasternet", "resnet"]
    )
    parser.add_argument("--epochs", default=100, type=int)
    parser.add_argument("--ckpt-path")
    parser.add_argument(
        "--config", "--cfg", default="fasternet/cfg/my_fasternet_t0.yaml"
    )
    parser.add_argument("--pretrained-weights")
    parser.add_argument("--batch-size", default=128, type=int)
    parser.add_argument("--tuner", action="store_true")
    parser.add_argument("--workers", type=int, default=4)
    parser.add_argument("--lambda-age", type=float, default=0.001)
    parser.add_argument("--lambda-gender", type=float, default=2)
    parser.add_argument("--lr", default=0.001, type=float)
    args = parser.parse_args()
    cfg_args = load_cfg(args.config)
    args = merge_args_cfg(args, cfg_args)
    return args


def random_split(dataset, lengths, generator=torch.default_generator):
    if math.isclose(sum(lengths), 1) and sum(lengths) <= 1:
        subset_lengths = [int(math.floor(len(dataset) * frac)) for frac in lengths]
        remainder = len(dataset) - sum(subset_lengths)
        for i in range(remainder):
            idx_to_add_at = i % len(subset_lengths)
            subset_lengths[idx_to_add_at] += 1
        lengths = subset_lengths
        for i, length in enumerate(lengths):
            if length == 0:
                warnings.warn(
                    f"Length of split at index {i} is 0. This might result in an empty dataset."
                )

    if sum(lengths) != len(dataset):
        raise ValueError(
            "Sum of input lengths does not equal the length of the input dataset!"
        )

    indices = torch.randperm(sum(lengths), generator=generator).tolist()
    return [
        Subset(dataset, indices[offset - length : offset])
        for offset, length in zip(_accumulate(lengths), lengths)
    ]


def slice_tensor(tensor, lengths):
    slices, start = {}, 0
    for name, length in lengths.items():
        slices[name] = tensor[:, start : start + length]
        start += length
    return slices


lengths = {
    "AGE": 1,
    "GENDER": 3,
    "GLASSES": 4,
    "RACE": 5,
    "EMOTION": 4,
    "MASK": 3,
    "HAT": 3,
    "WHISKERS": 3,
}

lengths_org = {
    "AGE": 102,
    "GENDER": 3,
    "GLASSES": 4,
    "RACE": 5,
    "EMOTION": 4,
    "MASK": 3,
    "HAT": 3,
    "WHISKERS": 3,
}


def get_data(batch_size=128):
    data_path = (
        "/root/code/cvmark/face/data/2792"
        if os.getenv("face_dev")
        else "/home/data/2792"
    )
    train_set, val_set = CustomDataset(data_path), CustomDataset(data_path, mode="val")
    print(f"train_set: {len(train_set)}, val_set: {len(val_set)}")
    train_loader = DataLoader(
        train_set, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True
    )
    val_loader = DataLoader(
        val_set, batch_size=batch_size, num_workers=4, pin_memory=True
    )
    return train_loader, val_loader


def get_resnet50(num_classes=272):
    model = resnet50(pretrained=False)
    in_features = model.fc.in_features
    model.fc = nn.Linear(in_features, num_classes)
    return model


def get_loss(loss_weights):
    return nn.CrossEntropyLoss(weight=loss_weights)


def get_optimizer(model, lr=0.001, weight_decay=1e-4):
    return optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)


def get_scheduler(optimizer, max_epochs, warmup_epochs=5, warmup_start_lr=1e-6):
    return LinearWarmupCosineAnnealingLR(
        optimizer, warmup_epochs, max_epochs, warmup_start_lr
    )


def train(args):
    model = get_resnet50()
    loss_weights = torch.ones(272)
    loss_weights[0] = args.lambda_age
    loss_weights[1:4] = args.lambda_gender
    loss_fn = get_loss(loss_weights)
    optimizer = get_optimizer(model, lr=args.lr)
    scheduler = get_scheduler(optimizer, args.epochs)
    train_loader, val_loader = get_data(args.batch_size)

    for epoch in range(args.epochs):
        model.train()
        train_loss_meter = AvgMeter("train_loss")
        for batch in train_loader:
            inputs, targets = batch["image"], batch["label"]
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_fn(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss_meter.update(loss.item(), inputs.size(0))

        model.eval()
        val_loss_meter = AvgMeter("val_loss")
        with torch.no_grad():
            for batch in val_loader:
                inputs, targets = batch["image"], batch["label"]
                outputs = model(inputs)
                loss = loss_fn(outputs, targets)
                val_loss_meter.update(loss.item(), inputs.size(0))

        print(
            f"Epoch {epoch + 1}/{args.epochs}, "
            f"Train Loss: {train_loss_meter.avg:.4f}, "
            f"Validation Loss: {val_loss_meter.avg:.4f}"
        )

        scheduler.step()

    torch.save(model.state_dict(), "resnet50.pth")


if __name__ == "__main__":
    args = get_args()
    train(args)
