import torch
import torch.nn as nn

class WrappedGPT:


    def __init__(self, layer, layer_id=0, layer_name="none"):
        self.layer = layer
        self.dev = self.layer.weight.device
        self.out_dim = layer.weight.data.shape[0]
        self.in_dim = layer.weight.data.shape[1]
        self.scaler_row = torch.zeros((self.in_dim), device=self.dev)
        self.nsamples = 0
        self.layer_id = layer_id
        self.layer_name = layer_name

    def add_batch(self, inp, out=None):
        # 如果输入是二维 [batch_size, hidden_dim]，则加上一个时间维度(相当于 seq_len=1)
        if len(inp.shape) == 2:
            inp = inp.unsqueeze(0)  # 变为 [1, batch_size, hidden_dim]
        # 如果输入是三维 [batch_size, seq_len, hidden_dim]，则压平前两个维度
        if len(inp.shape) == 3:
            inp = inp.reshape(-1, inp.shape[-1])
        if inp.size(-1) != self.in_dim:
            return

        inp = inp.transpose(0, 1).contiguous()  # 现在形状为 [hidden_dim, N]
        batch_size = inp.size(1)
        inp = inp.type(torch.float32)


        self.scaler_row *= self.nsamples / (self.nsamples + batch_size)
        self.scaler_row += torch.norm(inp, p=2, dim=1).pow(2) / (self.nsamples + batch_size)
        self.nsamples += batch_size

    @property
    def stats(self):
        return self.scaler_row / max(self.nsamples, 1)
    def free(self):
        self.scaler_row = None
        torch.cuda.empty_cache()
