import argparse
import math
import os
import warnings
from argparse import Namespace
from collections import defaultdict
from typing import Any, Dict, List

import fasternet
import lightning as L
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import yaml
from dataset import CustomDataset
from PIL import Image
from torch import default_generator, randperm
from torch._utils import _accumulate
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset
from torchvision import transforms
from torchvision.models import resnet50
from torch.nn import functional as F

class AvgMeter:
    def __init__(self, name="", fmt=":f"):
        self.name = name
        self.fmt = fmt
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


class LinearWarmupCosineAnnealingLR(_LRScheduler):
    def __init__(
        self, optimizer, warmup_epochs, max_epochs, warmup_start_lr, eta_min=0
    ):
        self.warmup_epochs = warmup_epochs
        self.max_epochs = max_epochs
        self.warmup_start_lr = warmup_start_lr
        self.eta_min = eta_min
        self.cycle_epochs = max_epochs - warmup_epochs
        super(LinearWarmupCosineAnnealingLR, self).__init__(optimizer)

    def get_lr(self):
        if self.last_epoch < self.warmup_epochs:
            # Linear warmup phase
            alpha = self.last_epoch / self.warmup_epochs
            return [
                self.warmup_start_lr + alpha * (base_lr - self.warmup_start_lr)
                for base_lr in self.base_lrs
            ]
        else:
            # Cosine annealing phase
            progress = (self.last_epoch - self.warmup_epochs) / self.cycle_epochs
            return [
                self.eta_min
                + 0.5 * (base_lr - self.eta_min) * (1 + np.cos(np.pi * progress))
                for base_lr in self.base_lrs
            ]


def merge_args_cfg(args, cfg):
    dict0 = vars(args)
    dict1 = vars(cfg)
    # use dict0 to update dict1, i.e. dict 0 is prioritized
    for k,v in dict0.items():
        if  v is not None or k not in dict1:
            dict1[k] = v
        
    return Namespace(**dict1)


def load_cfg(cfg):
    hyp = None
    if isinstance(cfg, str):
        try:
            with open(cfg, errors="ignore") as f:
                hyp = yaml.safe_load(f)  # load hyps dict
        except Exception as e:
            print("no that file")

    # return hyp
    return Namespace(**hyp)


def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--backbone", default="fasternet", choices=["fasternet", "resnet"]
    )
    parser.add_argument("--epochs", default=100, type=int)
    parser.add_argument("--ckpt-path")
    parser.add_argument("--config", "--cfg",  default="fasternet/cfg/my_fasternet_t0.yaml")
    # parser.add_argument("--pretrained-weights", default='fasternet/weights/fasternet_t0-epoch.281-val_acc1.71.9180.pth' )
    parser.add_argument("--pretrained-weights",)

    parser.add_argument("--batch-size", default=128, type=int)
    parser.add_argument("--tuner", action="store_true")
    parser.add_argument("--workers", type=int, default=4)
    parser.add_argument("--lambda-age", type=float, default=0.001)
    parser.add_argument("--lambda-gender", type=float, default=2)
    parser.add_argument('--lr', default=0.001, type=float)
    args = parser.parse_args()
    # parser.add_argument('--back')
    cfg_args = load_cfg(args.config)
    args = merge_args_cfg(args, cfg_args)

    return args


def random_split(dataset, lengths, generator=default_generator):
    r"""
    Randomly split a dataset into non-overlapping new datasets of given lengths.

    If a list of fractions that sum up to 1 is given,
    the lengths will be computed automatically as
    floor(frac * len(dataset)) for each fraction provided.

    After computing the lengths, if there are any remainders, 1 count will be
    distributed in round-robin fashion to the lengths
    until there are no remainders left.

    Optionally fix the generator for reproducible results, e.g.:

    >>> random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(42))
    >>> random_split(range(30), [0.3, 0.3, 0.4], generator=torch.Generator(
    ...   ).manual_seed(42))

    Args:
        dataset (Dataset): Dataset to be split
        lengths (sequence): lengths or fractions of splits to be produced
        generator (Generator): Generator used for the random permutation.
    """
    if math.isclose(sum(lengths), 1) and sum(lengths) <= 1:
        subset_lengths: List[int] = []
        for i, frac in enumerate(lengths):
            if frac < 0 or frac > 1:
                raise ValueError(f"Fraction at index {i} is not between 0 and 1")
            n_items_in_split = int(
                math.floor(len(dataset) * frac)  # type: ignore[arg-type]
            )
            subset_lengths.append(n_items_in_split)
        remainder = len(dataset) - sum(subset_lengths)  # type: ignore[arg-type]
        # add 1 to all the lengths in round-robin fashion until the remainder is 0
        for i in range(remainder):
            idx_to_add_at = i % len(subset_lengths)
            subset_lengths[idx_to_add_at] += 1
        lengths = subset_lengths
        for i, length in enumerate(lengths):
            if length == 0:
                warnings.warn(
                    f"Length of split at index {i} is 0. "
                    f"This might result in an empty dataset."
                )

    # Cannot verify that dataset is Sized
    if sum(lengths) != len(dataset):  # type: ignore[arg-type]
        raise ValueError(
            "Sum of input lengths does not equal the length of the input dataset!"
        )

    indices = randperm(sum(lengths), generator=generator).tolist()  # type: ignore[call-overload]
    return [
        Subset(dataset, indices[offset - length : offset])
        for offset, length in zip(_accumulate(lengths), lengths)
    ]


def slice_tensor(tensor, lengths):
    slices = {}
    start = 0

    for name, length in lengths.items():
        slices[name] = tensor[:, start : start + length]
        start += length

    return slices


# Define the lengths for each part
lengths = {
    "AGE": 1,
    "GENDER": 3,
    "GLASSES": 4,
    "RACE": 5,
    "EMOTION": 4,
    "MASK": 3,
    "HAT": 3,
    "WHISKERS": 3,
}

def get_data(batch_size = 128):
     
    if os.getenv("face_dev"):
        train_set = CustomDataset("/root/code/cvmark/face/data/2792")
        val_set = CustomDataset("/root/code/cvmark/face/data/2792", mode="val")
    else:
        train_set = CustomDataset("/home/data/2792")
        val_set = CustomDataset("/home/data/2792", mode="val")

    # train_set,val_set = random_split(train_set, [0.9, 0.1],)
    print(f"train_set: {len(train_set)}, val_set: {len(val_set)}")
    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=4)
    val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=4)
    return train_set, val_set, train_loader, val_loader
###模型定义模块
class ResNet50(nn.Module):
    def __init__(self, weight="./resnet.pth"):
        super().__init__()
        self.backbone = resnet50(pretrained=False)
        if os.path.exists(weight):
            self.backbone.load_state_dict(torch.load(weight))
        self.backbone.fc = nn.Identity()
        last_channels = 2048
        #     【1】age（年龄）：人脸年龄范围0-100，若为-1表示不完整的人脸；文本框形式，手动输入    102

        # 【2】gender（性别）：0：女 female，1：男 male，-1：不好辨别性别（婴儿或背面等不好辨别性别的情况）；  3

        # 【3】glasses（是否戴眼镜）：0：无眼镜 no glasses，1：有眼镜 glasses（普通的透视眼镜，如近视眼镜、老花眼镜等），2：墨镜 sunglasses，-1：背面等无法判断是否戴眼镜的情况； 4

        # 【4】race（人种）：0：黄种人，1：白种人，2：黑种人，3：印第安人，-1：不完整的人脸标注；  5

        # 【5】emotion（表情）：0：皱眉，1：笑，2：平静，-1：其它情况；  4

        # 【6】mask（口罩）：0：不戴口罩，1：戴口罩，-1：背面等无法判断是否戴口罩的情况； 3

        # 【7】hat（帽子）：0：不戴帽子，1：戴帽子，-1：背面等无法判断是否帽子的情况；  3

        # 【8】whiskers（胡须）：0：无胡须，1：有胡须，-1：背面等无法判断是否有胡须的情况；  3

        # 102 + 3 + 4 + 5 + 4 + 3 + 3 + 3 = 127
        # self.age = nn.Linear(last_channels, 1)
        # self.gender = nn.Linear(last_channels, 3)  # 0,1,-1 -> female, male, unknown
        # self.glasses = nn.Linear(last_channels, 4) # 0, 1, 2, -1 -> no, glass, sunglasses, unknown
        # self.race = nn.Linear(last_channels, 5) # 0：黄种人，1：白种人，2：黑种人，3：印第安人，-1：不完整的人脸标注；
        # self.emotion = nn.Linear(last_channels, 4) # 0：皱眉，1：笑，2：平静，-1：其它情况；
        # self.mask = nn.Linear(last_channels)

        # now I want to change age to one neuron, so we need MSE
        self.classifier = nn.Linear(last_channels, 26)

    def forward(self, imgs):
        x = self.backbone(imgs)
        return self.classifier(x)
class MyCrossEntropy(nn.CrossEntropyLoss):
    def __init__(self):
        super().__init__()
        self.weights = None

    def forward(self, output, target):
        # set the first elem to a small weight
        if self.weights is not None:
            weights = self.weights
        else:
            weights = torch.ones(output.shape[1]).to(output.device)
            weights[0] = 0.01
            self.weights
        return F.cross_entropy(output, target, weight=weights,
                               ignore_index=self.ignore_index, reduction=self.reduction,
                               label_smoothing=self.label_smoothing)

class AgeRegressor(nn.Module):
    def __init__(self, mid_feat) -> None:
        super().__init__()
        self.fc = nn.Linear(mid_feat, 1)

    def forward(self, x):
        x = self.fc(x)
        x = torch.round(x)
        return x 

class CVModule2(L.LightningModule):
    def __init__(self, **kwargs) -> None:
        super().__init__()

        self.save_hyperparameters()
        backbone = self.hparams.backbone
        if backbone == "resnet":
            model = ResNet50()
        elif backbone == "fasternet":

            model = fasternet.models.fasternet(**kwargs)
            # backbone.load_state_dict(torch.load(args.backbone_path))
            try:
                print(f'loading weight from {kwargs["pretrained_weights"]}')
                model.load_state_dict(
                    torch.load(
                        kwargs['pretrained_weights']
                    )
                )
            except FileNotFoundError:
                print("the pretrained fasternet not found")
            # last_in_feat = model.head.in_features
            mid_feat = 256
            model.head = nn.Sequential(
                nn.Linear(model.head.in_features, mid_feat), # mid feat
                nn.BatchNorm1d(mid_feat),
                nn.ReLU(inplace=True),
                nn.Dropout(0.5)
            )
            
        self.model = model
        self.age_regressor = AgeRegressor(mid_feat)
        self.other_classfier = nn.Linear(mid_feat, 25)  # all except age add to 25 classes
        # self.loss = dict()
        # self.loss = MyCrossEntropy()
        # for k, v in lengths.items():
        #     self.loss[k] = MyCrossEntropy()
        self.loss = nn.CrossEntropyLoss()
        self.age_loss = nn.MSELoss()
        self.example_input_array = torch.zeros((1, 3, 32, 32), dtype=torch.float32)


    def forward(self, imgs):
        x =  self.model(imgs)
        age = self.age_regressor(x)
        others = self.other_classfier(x)
        return torch.cat([age, others], dim=1)
        # return age, others
    

    def configure_optimizers(self):
        base_lr = self.hparams.lr
        warmup_lr = base_lr * 0.1
        min_lr = warmup_lr
        optimizer = optim.AdamW(self.parameters(), lr=base_lr)
        scheduler = LinearWarmupCosineAnnealingLR(
            optimizer,
            warmup_epochs=warmup_lr,
            max_epochs=self.hparams.epochs,
            warmup_start_lr=warmup_lr,
            eta_min=min_lr,
        )
        self.optimizer = optimizer

        return [optimizer], [scheduler]

    def acc(self, preds, labels):
        batch_size = preds.shape[0]
        preds = torch.argmax(preds, dim=1)
        correct = (preds == labels).sum().item()
        acc = correct / batch_size
        return acc, preds == labels
    def compute_loss(self, preds, labels):
        # age, others = preds
        slices = slice_tensor(preds, lengths)
        # label_age, label_others = labels[:, 0] , labels[:, 1:]
        loss = 0
        # acc_dict = dict()
        # corrects = torch.ones(imgs.shape[0], dtype=bool).to(labels.device)
        for i, (name, slice) in enumerate(slices.items()):
            if  name == "AGE":
                loss_item = self.age_loss(slice.flatten(), labels[:, i].float())
            else:
                loss_item = self.loss(slice, labels[:, i]) 
            if name == 'GENDER':
                loss += self.hparams.lambda_gender * loss_item
            elif name == 'AGE':
                loss += self.hparams.lambda_age * loss_item
            else:
                loss += loss_item
            # else:
            #     age = torch.round(slice)
            #     age_loss= (
            #         self.age_loss(age.flatten(), labels[:, i].float()) * self.hparams.lambda_age
            #     )
            #     loss+=age_loss

        # age_loss = self.age_loss(age.flatten(), label_age.float()) * self.hparams.lambda_age
        # loss += age_loss
        return loss
    def training_step(self, batch, batch_idx):
        imgs, labels = batch
        preds = self(imgs)
        loss = self.compute_loss(preds, labels)
        # slices = slice_tensor(others, lengths)
        # loss = 0
        # # acc_dict = dict()
        # # corrects = torch.ones(imgs.shape[0], dtype=bool).to(labels.device)
        # for i, (name, slice) in enumerate(slices.items()):
        #     # if not name == "AGE":
        #     loss_item = self.loss(slice, labels[:, i+1])  # because now labels first colume age is not in the  slice
        #     if name == 'GENDER':
        #         loss += self.hparams.lambda_gender * loss_item
        #     else:
        #         loss += loss_item
        #     # else:
        #     #     age = torch.round(slice)
        #     #     age_loss= (
        #     #         self.age_loss(age.flatten(), labels[:, i].float()) * self.hparams.lambda_age
        #     #     )
        #     #     loss+=age_loss

        # age_loss = self.age_loss(age.flatten(), labels[:, 0].float())
        # loss += age_loss * self.hparams.lamdba_age
        #     acc_dict[name], correct = self.acc(slice, labels[:, i])
        #     corrects = corrects & correct
        # acc_overall = corrects.sum().float() / corrects.shape[0]
        # loss = 0.5 * self.loss(preds[0], labels[0]) + 0.5 * self.loss(preds[1], labels[1])### 0.5*sex_loss + 0.5*age_loss
        # acc = 0.5 * (preds[0].argmax(dim=-1) == labels[0]).float().mean() + 0.5 * (preds[1].argmax(dim=-1) == labels[1]).float().mean()
        # for name, acc in acc_dict.items():
        #     self.log(f'train_acc_{name}', acc, on_step=True)
        # self.log('train_acc_overall', acc_overall, on_step=True)
        # self.log("train_loss", loss, on_step=True)
        self.train_loss_avg.update(loss.item())
        # self.train_age_loss_avg.update(age_loss.item())
        # self.log("lr", self.optimizer.param_groups[0]["lr"], on_step=True)
        return loss

    def validation_step(self, batch, batch_idx):
        imgs, labels = batch
        preds = self(imgs)
        # age, others = preds
        loss= self.compute_loss(preds, labels)
        self.valid_datalens += labels.shape[0]
        # self.num_batch += 1
        slices = slice_tensor(preds, lengths)
        # loss = 0
        corrects = torch.ones(imgs.shape[0], dtype=bool).to(labels.device)
        # _, correct = self.acc(age, labels_age)
        # self.correct_dict["AGE"] += correct.sum()
        # corrects &= correct
        for i, (name, slice) in enumerate(slices.items()):
            # if not name == "AGE":
                # loss += self.loss(slice, labels[:, i+1])
                _, correct = self.acc(slice, labels[:, i])
            # else:
            #     age = torch.round(slice).flatten()
            #     loss += self.age_loss(age, labels[:, i].float()) * self.hparams.lambda_age
            #     correct = torch.sum((age == labels[:, i].float()))
                self.correct_dict[name] += correct.sum()
                corrects = corrects & correct
        self.correct_overall += corrects.sum()
        # self.val_loss += loss.item()
        self.val_loss_avg.update(loss.item())
        # loss = 0.5 * self.loss(preds[0], labels[0]) + 0.5 * self.loss(preds[1], labels[1])### 0.5*sex_loss + 0.5*age_loss
        # acc = 0.5 * (preds[0].argmax(dim=-1) == labels[0]).float().mean() + 0.5 * (preds[1].argmax(dim=-1) == labels[1]).float().mean()
        # for name, acc in acc_dict.items():
        #     self.log(f'val_acc_{name}', acc, on_step=True)
        # self.log('val_acc_overall', acc_overall, on_step=True)
        # self.log("val_loss", loss, on_step=True)
        # return loss
        # return loss

    def on_train_epoch_end(self) -> None:
        self.log("train/loss", self.train_loss_avg.avg, on_step=False, on_epoch=True)
        # self.log("train/loss_age", self.train_age_loss_avg.avg, on_epoch=True)
        print("\ntrain_loss is ", self.train_loss_avg.avg)


    def on_train_epoch_start(self) -> None:
        self.train_loss_avg = AvgMeter()
        self.train_age_loss_avg = AvgMeter()
        self.log("lr", self.optimizer.param_groups[0]["lr"], on_step=False)

    def on_validation_epoch_start(self) -> None:
        self.valid_datalens = 0
        self.num_batch = 0
        # self.val_loss = 0
        self.val_loss_avg = AvgMeter()
        self.correct_dict = defaultdict(int)
        self.correct_overall = 0

    def on_validation_epoch_end(self) -> None:
        # self.train_loss_avg = self.train_loss_avg / self.num_batch
        # self.log('train_loss_avg', self.train_loss_avg, on_epoch=True)
        # self.train_loss_avg = 0
        # print('lens of valid dataset is ', self.valid_datalens)
        # self.val_loss = self.val_loss / self.num_batch
        for name, correct in self.correct_dict.items():
            self.log(f"val/acc_{name}", correct / self.valid_datalens, on_epoch=True)
        self.log(
            "val/acc_overall", self.correct_overall / self.valid_datalens, on_epoch=True
        )
        self.log("val/loss", self.val_loss_avg.avg, on_epoch=True)
        print("\nval_loss is ", self.val_loss_avg.avg)

    def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
        return super().on_save_checkpoint(checkpoint)

    @staticmethod
    def prepare_picture(img):
        transform = transforms.Compose(
            [
                transforms.ToTensor(),
                transforms.Normalize(
                    mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
                ),
            ]
        )
        img = Image.open(img).convert("RGB")
        img = transform(img)
        return img.unsqueeze(0)
class CVModule(L.LightningModule):
    def __init__(self, **kwargs) -> None:
        super().__init__()

        self.save_hyperparameters()
        backbone = self.hparams.backbone
        if backbone == "resnet":
            model = ResNet50()
        elif backbone == "fasternet":

            model = fasternet.models.fasternet(**kwargs)
            # backbone.load_state_dict(torch.load(args.backbone_path))
            try:
                print(f'loading weight from {kwargs["pretrained_weights"]}')
                model.load_state_dict(
                    torch.load(
                        kwargs['pretrained_weights']
                    )
                )
            except FileNotFoundError:
                print("the pretrained fasternet not found")

            model.head = nn.Linear(model.head.in_features, 127)
        self.model = model
        self.loss = nn.CrossEntropyLoss()
        self.age_loss = nn.MSELoss()
        self.example_input_array = torch.zeros((1, 3, 32, 32), dtype=torch.float32)


    def forward(self, imgs):
        return self.model(imgs)

    def configure_optimizers(self):
        base_lr = self.hparams.lr
        warmup_lr = base_lr * 0.1
        min_lr = warmup_lr
        optimizer = optim.AdamW(self.parameters(), lr=base_lr)
        scheduler = LinearWarmupCosineAnnealingLR(
            optimizer,
            warmup_epochs=warmup_lr,
            max_epochs=self.hparams.epochs,
            warmup_start_lr=warmup_lr,
            eta_min=min_lr,
        )
        self.optimizer = optimizer

        return [optimizer], [scheduler]

    def acc(self, preds, labels):
        batch_size = preds.shape[0]
        preds = torch.argmax(preds, dim=1)
        correct = (preds == labels).sum().item()
        acc = correct / batch_size
        return acc, preds == labels

    def training_step(self, batch, batch_idx):
        imgs, labels = batch
        preds = self.model(imgs)
        slices = slice_tensor(preds, lengths)
        loss = 0
        # acc_dict = dict()
        # corrects = torch.ones(imgs.shape[0], dtype=bool).to(labels.device)
        for i, (name, slice) in enumerate(slices.items()):
            if not name == "AGE":
                loss += self.loss(slice, labels[:, i])
            else:
                age = torch.round(slice)
                age_loss= (
                    self.age_loss(age.flatten(), labels[:, i].float()) * self.hparams.lambda_age
                )
                loss+=age_loss
        #     acc_dict[name], correct = self.acc(slice, labels[:, i])
        #     corrects = corrects & correct
        # acc_overall = corrects.sum().float() / corrects.shape[0]
        # loss = 0.5 * self.loss(preds[0], labels[0]) + 0.5 * self.loss(preds[1], labels[1])### 0.5*sex_loss + 0.5*age_loss
        # acc = 0.5 * (preds[0].argmax(dim=-1) == labels[0]).float().mean() + 0.5 * (preds[1].argmax(dim=-1) == labels[1]).float().mean()
        # for name, acc in acc_dict.items():
        #     self.log(f'train_acc_{name}', acc, on_step=True)
        # self.log('train_acc_overall', acc_overall, on_step=True)
        # self.log("train_loss", loss, on_step=True)
        self.train_loss_avg.update(loss.item())
        self.train_age_loss_avg.update(age_loss.item())
        # self.log("lr", self.optimizer.param_groups[0]["lr"], on_step=True)
        return loss

    def validation_step(self, batch, batch_idx):
        imgs, labels = batch
        preds = self.model(imgs)
        self.valid_datalens += labels.shape[0]
        self.num_batch += 1
        slices = slice_tensor(preds, lengths)
        loss = 0
        corrects = torch.ones(imgs.shape[0], dtype=bool).to(labels.device)
        for i, (name, slice) in enumerate(slices.items()):
            if not name == "AGE":
                loss += self.loss(slice, labels[:, i])
                _, correct = self.acc(slice, labels[:, i])
            else:
                age = torch.round(slice).flatten()
                loss += self.age_loss(age, labels[:, i].float()) * self.hparams.lambda_age
                correct = torch.sum((age == labels[:, i].float()))
            self.correct_dict[name] += correct.sum()
            corrects = corrects & correct
        self.correct_overall += corrects.sum()
        self.val_loss += loss.item()
        # loss = 0.5 * self.loss(preds[0], labels[0]) + 0.5 * self.loss(preds[1], labels[1])### 0.5*sex_loss + 0.5*age_loss
        # acc = 0.5 * (preds[0].argmax(dim=-1) == labels[0]).float().mean() + 0.5 * (preds[1].argmax(dim=-1) == labels[1]).float().mean()
        # for name, acc in acc_dict.items():
        #     self.log(f'val_acc_{name}', acc, on_step=True)
        # self.log('val_acc_overall', acc_overall, on_step=True)
        # self.log("val_loss", loss, on_step=True)
        # return loss

    def on_train_epoch_end(self) -> None:
        self.log("train/loss", self.train_loss_avg.avg, on_step=False, on_epoch=True)
        self.log("train/loss_age", self.train_age_loss_avg.avg, on_epoch=True)

    def on_train_epoch_start(self) -> None:
        self.train_loss_avg = AvgMeter()
        self.train_age_loss_avg = AvgMeter()
        self.log("lr", self.optimizer.param_groups[0]["lr"], on_step=False)

    def on_validation_epoch_start(self) -> None:
        self.valid_datalens = 0
        self.num_batch = 0
        self.val_loss = 0
        self.correct_dict = defaultdict(int)
        self.correct_overall = 0

    def on_validation_epoch_end(self) -> None:
        # self.train_loss_avg = self.train_loss_avg / self.num_batch
        # self.log('train_loss_avg', self.train_loss_avg, on_epoch=True)
        # self.train_loss_avg = 0
        # print('lens of valid dataset is ', self.valid_datalens)
        self.val_loss = self.val_loss / self.num_batch
        for name, correct in self.correct_dict.items():
            self.log(f"val_acc_{name}", correct / self.valid_datalens, on_epoch=True)
        self.log(
            "val_acc_overall", self.correct_overall / self.valid_datalens, on_epoch=True
        )
        self.log("val_loss", self.val_loss, on_epoch=True)

    def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
        return super().on_save_checkpoint(checkpoint)

    @staticmethod
    def prepare_picture(img):
        transform = transforms.Compose(
            [
                transforms.ToTensor(),
                transforms.Normalize(
                    mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
                ),
            ]
        )
        img = Image.open(img).convert("RGB")
        img = transform(img)
        return img.unsqueeze(0)
