import pytest # 引入pytest包 
import os
import torch
from torch import optim, nn, utils
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
import numpy as np
from ciallo_trainer.trainer import Trainer, TrainerConfig
from ciallo_trainer.module import Module, TensorDict, LogSystem, Float32, Tensor
from dataclasses import dataclass, field
import tyro


# 定义简单的神经网络
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Sequential(nn.Linear(28 * 28, 1024*16),nn.Linear(1024*16, 1024*16),nn.Linear(1024*16, 1024*16))
        self.fc2 = nn.Linear(1024*16, 10)

    def forward(self, x):
        x = x.view(-1, 28 * 28)
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x
def spectral_norm(matrix:torch.Tensor, use_batch_size=False):
    assert matrix.ndim == 2, "matrix should be a 2D tensor, or a batch of 1D tensors"
    if use_batch_size:
        return matrix.norm(dim=1).mean().cpu()
    S = torch.linalg.svdvals(matrix)
    return S[0].cpu()    
@dataclass
class ModuleConfig:
    lr:float = 1e-1
    feature_dims:list = field(default_factory=lambda: [512] * 5)
    optimizer:str = 'SGD'
class AutoEncoder(Module, nn.Module):
    def __init__(self, config: ModuleConfig):
        super().__init__(example_input_tensor=torch.randn(1, 28 * 28)) # example_input_tensor is used to infer the shape of the input tensor， which is helpful for deployment.
        self.config = config

        self.layers = nn.Sequential()
        for i in range(len(config.feature_dims)):
            self.layers.add_module(f"linear{i}", nn.LazyLinear(config.feature_dims[i], bias=False))
            self.layers.add_module(f"relu{i}", nn.ReLU())
        self.layers.add_module("output", nn.LazyLinear(10, bias=False))

        self.forward(self.example_input_tensor)
        self.lr_scaling_dict = {}
        
        self.init_type = "xxx"
        self.init_mlp()
        # self.config_optimize()

    def forward(self, x:torch.Tensor):
        x = x.view(-1, 28 * 28)

        x = self.layers(x)
        return x
    
    def init_mlp(self):
        constval = 1
        for name, param in self.named_parameters():
            shape = param.shape
            fan_in, fan_out = torch.nn.init._calculate_fan_in_and_fan_out(param)

            if name.endswith("weight"):
                ######################################
                # set the learning rate scaling
                ######################################
                self.lr_scaling_dict[name] = (fan_out / fan_in)
                if self.init_type == "orthogonal":
                    ######################################
                    # initialize the weights
                    ######################################
                    nn.init.orthogonal_(param.data)
                    sigma = constval * torch.sqrt(torch.tensor(fan_out / fan_in))
                    
                elif self.init_type == "normal":
                    torch.nn.init.normal_(param.data)
                    sigma = np.sqrt(fan_out / fan_in) / (np.sqrt(fan_in) + np.sqrt(fan_out))
                    sigma = np.sqrt(2 / fan_in)
                else:
                    torch.nn.init.xavier_uniform_(param.data)
                    sigma = 1
                    # raise NotImplementedError("Only orthogonal initialization is supported now")
                
                param.data *= sigma
            else:
                raise ValueError("Unexpected parameter name {}".format(name))
            print(f"Initialized {name} with fan_in={fan_in}, fan_out={fan_out} and lr_scaling={self.lr_scaling_dict[name]}")
    
    def training_step(self, batch:TensorDict, batch_idx:int) -> Float32[Tensor, "B 1"]:
        x, target = batch
        # y = self(x)
        x = x.view(-1, 28 * 28)
        
        x = torch.nn.functional.normalize(x, dim=1) * np.sqrt(x.size(1))
        for i in range(len(self.config.feature_dims)):
            w = getattr(self.layers, f"linear{i}")
            self.logger.log({f"Inspection/linear{i}_weight": spectral_norm(w.weight.data.detach())})
            x = w(x)
            self.logger.log({f"Inspection/linear{i}_pre_activation": spectral_norm(x.detach(), use_batch_size=True)})
            x = torch.relu(x)
            self.logger.log({f"Inspection/linear{i}_post_activation": spectral_norm(x.detach(), use_batch_size=True)})
        
        y = self.layers.output(x)
        loss = nn.functional.cross_entropy(y, target)

        
        self.logger.log({"Train/train_loss": loss}) # just pass the gpu tensor to the logger, the logger will log it in async way.
        
        for i in range(len(self.config.feature_dims)):
            w = getattr(self.layers, f"linear{i}").weight.data.detach()
            self.logger.log({f"Inspection/linear{i}_weight": spectral_norm(w)})

        # self.logger.log({"Inspection/layer_norm": accuracy(y, target)})
        return loss
    
    def configure_optimizers(self):
        # global_lr = 1e-3
        self.lr_scaling_dict
        param_groups = []
        for name, param in self.named_parameters():
            if 'weight' in name:
                param_groups.append({'params': [param], 'lr': self.config.lr * self.lr_scaling_dict[name]})
            else:
                raise NotImplementedError("Only orthogonal initialization is supported now")
        optimizer = torch.optim.SGD(param_groups)
        optimizer = torch.optim.SGD(self.parameters(), lr=self.config.lr)
        
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)
        return optimizer, scheduler

    def validation_step(self, batch:TensorDict, batch_idx:int) -> None:
        x, target = batch

        y = self(x)
        _, predicted = torch.max(y, 1)
        
        self.total += predicted.size(0)
        self.correct += (predicted == target).sum().item()

    def on_validation_start(self):
        self.total = 0
        self.correct = 0
        return super().on_validation_start()

    def on_validation_end(self):    
        acc = self.correct / self.total
        self.logger.log({"Validation/accuracy": acc}) 
        return super().on_validation_end()
    
import torch
import os
from torch import nn
from torchvision import datasets, transforms

def main(
    model_config:ModuleConfig = ModuleConfig(lr=0.1), 
    trainer_config:TrainerConfig = TrainerConfig(max_epochs=200, save_interval=500, use_amp=True, eval_interval=30),
    batch_size:int = 1024
    ):
    model = AutoEncoder(model_config)

    transform = transforms.Compose([transforms.ToTensor()])
    dataset = MNIST(os.getcwd(), download=True, transform=transform)
    
    train_loader = utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)

    trainer = Trainer(trainer_config)
    
    test_dataset = datasets.MNIST(os.getcwd(), train=False, download=True, transform=transform)
    test_loader = utils.data.DataLoader(test_dataset, batch_size=1024, shuffle=False)

    trainer.train(train_loader,test_loader, model, check_exportable=False)
    # trainer.validate(test_loader, model, 3)

if __name__ == "__main__":
    tyro.cli(main)
