import torch
from torch import nn
from torchvision import transforms as T
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader, random_split
import pytorch_lightning as pl
from torchmetrics import Accuracy
from torchmetrics.functional import accuracy,precision,recall,f1_score
from config import  config

class Backbone(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.l1 = nn.Conv2d(in_channels=1 ,out_channels=32 ,kernel_size = 3)
        self.l2 = nn.MaxPool2d(kernel_size = 2 ,stride = 2)
        self.l3 = nn.Conv2d(in_channels=32 ,out_channels=64 ,kernel_size = 5)
        self.l4 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.l5 = nn.Dropout2d(p = config.dropout_p)
        self.l6 = nn.AdaptiveMaxPool2d((1 ,1))
        self.l7 = nn.Flatten()
        self.l8 = nn.Linear(64 ,32)
        self.l9 = nn.ReLU()
        self.l10 = nn.Linear(32 ,10)

    def forward(self, x):
        x = self.l1(x)
        x = self.l2(x)
        x = self.l3(x)
        x = self.l4(x)
        x = self.l5(x)
        x = self.l6(x)
        x = self.l7(x)
        x = self.l8(x)
        x = self.l9(x)
        x = self.l10(x)

        return x


class Model(pl.LightningModule):

    def __init__(self ,net):
        super().__init__()
        self.net = net
        self.loss = nn.CrossEntropyLoss()
        self.save_hyperparameters(ignore=["backbone"])

    def forward(self ,x):
        x = self.net(x)
        return x

    # 定义loss
    def training_step(self, batch, batch_idx):
        preds, loss, acc = self._get_preds_loss_accuracy(batch)
        f1,prec,rec =self._get_F1_precision_recall(batch)
        # Log loss and metric
        self.log('train_loss', loss,on_step=True, on_epoch=False)
        self.log('train_accuracy', acc,on_step=False, on_epoch=True)
        self.log('f1', f1, on_step=True, on_epoch=False)
        self.log('precision', prec, on_step=True, on_epoch=False)
        self.log('recall', rec, on_step=True, on_epoch=False)

        # print("training_step ---------")
        return {"loss" :loss ,"preds" :preds}


    def validation_step(self, batch, batch_idx):
        preds, loss, acc = self._get_preds_loss_accuracy(batch)
        # Log loss and metric
        self.log('val_loss', loss,on_step=False, on_epoch=True)
        self.log('val_accuracy', acc,on_step=False, on_epoch=True)
        # print("validation_step ---------")
        return {"loss" :loss ,"preds" :preds}


    def test_step(self, batch, batch_idx):
        preds, loss, acc = self._get_preds_loss_accuracy(batch)
        # Log loss and metric
        self.log('test_loss', loss,on_step=False, on_epoch=True)
        self.log('test_accuracy', acc,on_step=False, on_epoch=True)
        return {"loss" :loss ,"preds" :preds}

    # 定义optimizer,以及可选的lr_scheduler
    def configure_optimizers(self):
        return torch.optim.Adam(self.parameters(), lr=0.02)
    def _get_preds_loss_accuracy(self, batch):
        '''convenience function since train/valid/test steps are similar'''
        x, y = batch
        logits = self(x)
        preds = torch.argmax(logits, dim=1)
        loss = self.loss(logits, y)
        acc = accuracy(task = "multiclass", num_classes = 10,target = y,preds = preds)
        return preds, loss, acc
    def _get_F1_precision_recall(self, batch):
        '''convenience function since train/valid/test steps are similar'''
        x, y = batch
        logits = self(x)
        preds = torch.argmax(logits, dim=1)
        prec = precision(task = "multiclass", num_classes = 10,target = y,preds = preds)
        rec = recall(task="multiclass", num_classes=10, target=y, preds=preds)
        f1 = f1_score(task="multiclass", num_classes=10, target=y, preds=preds)
        return f1, prec, rec

if __name__ == '__main__':

    model = Model()

    # 查看模型大小
    model_size = pl.utilities.memory.get_model_size_mb(model)
    print("model_size = {} M \n".format(model_size))
    # model.example_input_array = [features]
    summary = pl.utilities.model_summary.ModelSummary(model ,max_depth=-1)
    print(summary)