from typing import Any
from torch import nn
from torch import optim
from torchvision import models
import lightning as L
# import pytorch_lightning as pl
from lightning import pytorch as pl
from collections import defaultdict
import torch
import numpy as np
from tlhengine.utils import AvgMeter
from tlhengine import utils as tutils
from functools import partial
from tlhengine.models import get_model
import torchvision as tv
from typing import Optional, Union, Dict
from copy import deepcopy
from overrides import overrides
from pytorch_lightning.utilities import rank_zero_only

from argparse import Namespace
        
        
class MyResnet(nn.Module):
    def __init__(self, out_dim=5):
        super().__init__()
        self.features = tv.models.resnet50(weights='DEFAULT')
        self.feat_dim = self.features.fc.in_features
        self.classifier = nn.Linear(self.feat_dim, out_dim)
        self.features.fc = nn.Identity()
        
    def forward(self, x):
        return self.classifier(self.features(x))

class GN32(nn.GroupNorm):
    def __init__(self, num_channels, num_groups=32):
        super().__init__(num_groups, num_channels)

class BasicBlock(nn.Module):
    def __init__(self, in_c, out_c, bn=nn.BatchNorm1d, dropout_rate=0.5):
        super(BasicBlock, self).__init__()
        self.fc = nn.Linear(in_c, out_c)
        self.bn1 = bn(out_c)
        self.relu = nn.ReLU()
        if dropout_rate > 0:
            self.dropout = nn.Dropout(dropout_rate)
    def forward(self, x):
        x = self.fc(x)
        x = self.bn1(x)
        x = self.relu(x)
        if hasattr(self, 'dropout'):
            x = self.dropout(x)
        return x


class Model(L.LightningModule):
    def __init__(self, **kwargs):
        super().__init__()
        self.save_hyperparameters()

        self.classes = {
            0 : '抽烟',
            1 : '赤膊',
            2 : '老鼠',
            3 : '猫',
            4 : '狗',
            # 5: '正常'
        }
        backbone = MyResnet()
        self.backbone = backbone.features
        feat_size = backbone.feat_dim
        
        head_cfg = [
            {'in_c': feat_size, 'out_c': 512, 'bn': GN32, 'dropout_rate': 0.5},
            {'in_c': 512, 'out_c': 512, 'bn': GN32, 'dropout_rate': 0.5},
            {'in_c': 512, 'out_c': 512, 'bn': GN32, 'dropout_rate': 0.5},
            # {'in_c': 512, 'out_c': 512, 'bn': GN32, 'dropout_rate': 0.5},
            # {'in_c': 512, 'out_c': 512, 'bn': GN32, 'dropout_rate': 0.5},
            
            ]
        self.head = nn.Sequential(
            *[BasicBlock(**cfg ) for cfg in head_cfg],
            nn.Linear(512, 5)
            # nn.Sigmoid()
        )
        
        # backbone.load_state_dict(torch.load('/root/code/learn/learn2learn/meta_res50_last.pth'))
        # self.head = backbone.classifier
        
        # backbone = get_model(Namespace(**kwargs))
        # with open('model.txt', 'w') as f:
        #     print(backbone,file=f)
        # backbone = models.resnet50(weights='DEFAULT')
        # feat_size =backbone.fc.in_features
        # feat_size = 384
        
        # backbone.classifier = nn.Identity()
        # self.backbone = backbone
        
        # backbone.fc = nn.Identity()
        # self.backbone = backbone.backbone

        self.criterion = nn.BCEWithLogitsLoss()
        
        if kwargs['frozen']:
            for param in self.backbone.parameters():
                param.requires_grad = False
    
    def forward(self, x):
        x = self.backbone(x)
        x = self.head(x)
        return x
    
    # def predict_step(self, *args: Any, **kwargs: Any) -> Any:
    #     return super().predict_step(*args, **kwargs)()
    
    def configure_optimizers(self):
        # set the weight decay to 0 in BatchNorm
        # for m in self.modules():
        #     if isinstance(m, nn.BatchNorm2d):
        #         m.weight.decay = 0
        #         m.bias.decay = 0
        parameters = tutils.set_weight_decay(self, weight_decay=self.hparams.weight_decay, norm_weight_decay=0)
        # optimizer = optim.AdamW(parameters, lr=self.hparams.lr,)
        optimizer = getattr(optim, self.hparams.optimizer)(parameters, lr=self.hparams.lr,)
        self.optimizer = optimizer
        
        # lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=self.hparams.epochs)
        # lr_scheduler = tutils.warmupCosineSchedule(optimizer, t_total=self.hparams.epochs, warmup_steps=5)
        lr_scheduler = tutils.LinearWarmupCosineAnnealingLR(
            optimizer, warmup_epochs=5, max_epochs=self.hparams.epochs, warmup_start_factor=0.1)
        return [optimizer], [lr_scheduler]
    def training_step(self, batch, batch_idx):
        x, y = batch
        y_pred = self(x)
        loss = self.compute_loss(y_pred, y)
        self.train_loss.update(loss.item())
        return loss
    
    def validation_step(self, batch, batch_idx) :
        x, y = batch
        self.total_val_sampels += y.shape[0]
        y_pred = self(x)
        loss = self.compute_loss(y_pred, y)
        self.val_loss.update(loss.item())
        self.correct(y_pred , y)
        
        
    def compute_loss(self, y_pred, y):
        loss = 0
        for i in range(y_pred.shape[1]):
            loss += self.criterion(y_pred[:, i], y[:, i])
        return loss
    
    def correct(self, y_pred, y):
        y_pred = torch.sigmoid(y_pred)
        correct_for_all = None
        for k, v in self.classes.items():
            class_correct = (y_pred[:, k] > 0.5) == y[:, k]
            if correct_for_all is None:
                correct_for_all = class_correct.clone()
            else:
                correct_for_all *= class_correct
            self.correct_dict[v] += class_correct.sum()
        self.correct_dict['all'] += correct_for_all.sum()
        
    def on_train_epoch_start(self) -> None:
        self.train_loss = AvgMeter()
        self.log(f'train/lr', self.optimizer.param_groups[0]['lr'])
        
    def on_train_epoch_end(self) -> None:
        self.log('train/loss', self.train_loss.avg)
    
    def on_validation_epoch_start(self) -> None:
        self.total_val_sampels = 0
        self.correct_dict = defaultdict(int)
        self.val_loss = AvgMeter()

    
    def on_validation_epoch_end(self) -> None:
        self.acc_dict = dict()
        self.log('val/loss', self.val_loss.avg)
        for k, v in self.correct_dict.items():
            self.acc_dict[k] = v / self.total_val_sampels
            self.log(f'val_acc/{k}', self.acc_dict[k])
            print(f'val_acc/{k}: ', self.acc_dict[k].item())
        
            
        # self.log(f'hp_metric', self.hparams)
        # self._log_hyperparams

            