import numpy as np
import torch
from torch import nn
import lightning as L
from .switch import TransformerBlock
from networks.common.loss import Rating
from networks.common.normalization import RMSNorm
from utils.modules import clone_module_list

from dataclasses import dataclass
from typing import Optional
from networks.common.loss_pk import Rating, MRE
import torch.nn.functional as F
from utils import register_class


@dataclass
class ModelArgs:
    input_dim: int = 3
    output_dim: int = 1
    width: int = 256
    n_heads: int = 8
    n_layers: int = 8
    n_experts: int = 8
    act: str = 'relu'
    base: int = 10000
    normalization: str = 'layerNorm' # optional ['layerNorm', 'RMSnorm']
    attn_type: str = 'l1'  # optional ['l1', 'galerkin', 'l2']
    norm_eps: float = 0.00001
    moe_capacity_factor: float = 1.5
    # `dropout_prob` is the probability of dropping out after self attention and FFN
    dropout_prob: Optional[float] = None
    lr: float = 0.0001

@register_class(name=['solver'])
class solver(L.LightningModule):

    def __init__(self, 
                 normalizer_y = None,
                 normalizer_x = None,
                 use_wandb=False,
                 **network_args,
                 ) -> None:
        super().__init__()
        self.__name__ = 'solver'
        self.save_hyperparameters(ignore=['normalizer_x', 'normalizer_y'])

        self.params = ModelArgs(**network_args)
        print('='*20, 'Parameters', '='*20)
        print(self.params)
        self.lr = self.params.lr
        print('='*50)

        assert self.params.act == 'SwiGLU', \
            "Current model only support SwiGLU activation function"
    
        self.base = self.params.base
        self.width = self.params.width
        
        if normalizer_y:
            self.normalizer_y = normalizer_y
            
        if normalizer_x:
            self.normalizer_x = normalizer_x

        self.loss = MRE()
        
        self.rating = Rating()
        
        # encoder and decoder
        self.encoder_l1 = nn.Linear(self.params.input_dim, self.params.width)
        self.encoder_l2 = nn.Linear(self.params.width, self.params.width)

        self.decoder_l1 = nn.Linear(self.params.width, self.params.width)
        self.decoder_l2 = nn.Linear(self.params.width, self.params.output_dim)

        if self.params.normalization == 'layerNorm':
            self.norm = nn.LayerNorm([self.d])
        elif self.params.normalization == 'RMSnorm':
            self.norm = RMSNorm(self.params.width, eps=self.params.norm_eps)
        else:
            raise NotImplementedError

        base_layer = TransformerBlock(
            args = self.params
        )
        
        self.transformerBlocks = clone_module_list(base_layer, self.params.n_layers)
    
    
    def get_cos_sin_item(self, x: torch.Tensor):
        assert x.shape[-1] == 3, \
            "get_cos_sin_item only work on the dimension of the nodes"
        
        theta = 1. / (self.base ** (torch.arange(0, self.width, 2).float() / self.width)).to(x.device)
        
        idx_theta1 = []
        for i in range(x.shape[-1]):
            idx_theta1.append(torch.einsum('bsi,id->bsd', x[..., i][..., None], theta[None]))
            
        idx_theta2 = [torch.cat([idx_theta, idx_theta], dim=-1) for idx_theta in idx_theta1]
        
        cos_item = [idx_theta.cos()[None] for idx_theta in idx_theta2]
        sin_item = [idx_theta.sin()[None] for idx_theta in idx_theta2]
        
        
        cos_item = torch.cat(cos_item, dim=0)
        sin_item = torch.cat(sin_item, dim=0)
        return torch.cat([cos_item, sin_item], dim=0)
    
    def forward(self, x):
        
        cos_sin_item = self.get_cos_sin_item(x[..., :3])
        
        z= self.encoder_l2(F.silu(self.encoder_l1(x)))
        
        counts, route_prob, n_dropped, route_prob_max, routes_max = [], [], [], [], []

        for layer in self.transformerBlocks:
            
            z, f, p, n_d, p_max, routes = layer(x=z, cos_sin_item=cos_sin_item)
            counts.append(f)
            route_prob.append(p)
            n_dropped.append(n_d)
            route_prob_max.append(p_max)
            routes_max.append(routes)
        
        # Finally, normalize the vectors
        z = self.norm(z)
        
        z = self.decoder_l2(F.silu(self.decoder_l1(z)))
        
        return z, counts[-1],route_prob[-1],n_dropped[-1],route_prob_max[-1], routes_max[-1]



    def training_step(self, batch, batch_idx):
        feature, label = batch
        feature_nor = self.normalizer_x.encode(feature)        
        label_nor = self.normalizer_y.encode(label)       
         
        z, _, _, _, _, _ = self(feature_nor)
        z_nor = self.normalizer_y.decode(z)
        loss = self.loss(z_nor, label)
        
        
        self.log('train_loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
        return loss


    def validation_step(self, batch, batch_idx):
        
        loss = []
        
        feature, label = batch
        feature_nor = self.normalizer_x.encode(feature)        
        label_nor = self.normalizer_y.encode(label)         
        
        
        for i in range(feature_nor.shape[0]):
            pre, _, _, _, _,_  = self(feature_nor[i].unsqueeze(0))
            pre_nor = self.normalizer_y.decode(pre)
            loss_ = self.loss(pre_nor, label[i].unsqueeze(0))
            loss.append(loss_.item())
        
        
        loss = np.array(loss).mean()

        self.log('val_loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
        
        results = {'val_loss': loss}
        
        return results

    def predict_step(self, batch, batch_idx):
        return self(batch)

    def configure_optimizers(self):
        # 设置优化器
        optimizer = torch.optim.AdamW(self.parameters(), lr=self.lr, weight_decay=1e-6)
        # 设置学习率调度器
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5, last_epoch=-1)
        return {
            'optimizer': optimizer,
            'lr_scheduler': {
                'scheduler': scheduler,
                'interval': 'epoch',  # or 'step' depending on when you want to update the learning rate
                'frequency': 1
            }
        }
