import torch
import torch.nn as nn
from einops import einsum, rearrange, repeat, reduce
import numpy as np
from jaxtyping import Float, Int
from torch import Tensor
import os


class Linear(nn.Module):

    def __init__(self, in_features: int, out_features: int, device=None, dtype=None):
        super().__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = nn.Parameter(torch.empty([out_features, in_features], dtype=dtype, device=device), requires_grad=True)
        self.reset_parameter()
        
    def reset_parameter(self):
        std = np.sqrt(2 / (self.in_features + self.out_features))
        nn.init.trunc_normal_(self.weight, 0, std, -3 * std, 3 * std)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return einsum(x, self.weight, "... d_in, d_out d_in -> ... d_out") 


class Embedding(nn.Module):

    def __init__(self, num_embeddings: int, embedding_dim: int, device=None, dtype=None):
        super().__init__()
        self.num_embeddings = num_embeddings
        self.embedding_dim = embedding_dim
        self.device = device
        self.weight = nn.Parameter(torch.ones([num_embeddings, embedding_dim], device=device, dtype=dtype))

    def reset_parameter(self):
        nn.init.trunc_normal_(self.weight, 0, 1, -3, 3)

    def forward(self, token_ids: torch.Tensor) -> torch.Tensor:
        one_hot = torch.zeros(
            token_ids.shape[0],
            token_ids.shape[1],
            self.num_embeddings,
            device=token_ids.device
        )
        batch_indices = torch.arange(token_ids.shape[0]).unsqueeze(1).expand_as(token_ids)
        seq_indices = torch.arange(token_ids.shape[1]).unsqueeze(0).expand_as(token_ids)
        one_hot[batch_indices, seq_indices, token_ids] = 1

        return einsum(one_hot, self.weight, "... vocab_size, vocab_size d_model -> ... d_model")

class RMSNorm(nn.Module):
    def __init__(self, d_model: int, eps: float = 1e-5, device=None, dtype=None):
        super().__init__()
        self.d_model = d_model
        self.eps = eps
        self.device = device
        self.dtype = dtype

        self.weight = nn.Parameter(torch.randn(d_model, device=self.device, dtype=self.dtype))

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        in_dtype = x.dtype
        variace = x.to(torch.float32).pow(2).mean(-1, keepdim=True)
        x = x * torch.rsqrt(variace + self.eps)
        res = x * self.weight
        return res.to(in_dtype)
    
def silu(x: torch.Tensor):
    return x * nn.functional.sigmoid(x)

class SwiGLU(nn.Module):
    def __init__(self, d_model: int, d_ff: int):
        super().__init__()
        self.d_model = d_model
        self.d_ff = d_ff

        self.w1 = Linear(d_model, d_ff)  #nn.Parameter(torch.randn([self.d_ff, self.d_model]))
        self.w2 = Linear(d_ff, d_model) #nn.Parameter(torch.randn([self.d_model, self.d_ff]))
        self.w3 = Linear(d_model, d_ff) #nn.Parameter(torch.randn([self.d_ff, self.d_model]))
        self.reset_parameter()
        

    def reset_parameter(self):
        nn.init.xavier_normal_(self.w1.weight)
        nn.init.xavier_normal_(self.w2.weight)
        nn.init.xavier_normal_(self.w3.weight)

    def forward(self, x: torch.Tensor):
        x = self.w2(silu(self.w1(x)) * self.w3(x))
        return x
    
class RotaryPositionalEmbedding(nn.Module):
    def __init__(self, theta: float, max_seq_len: int, d_k: int, device=None):
        super().__init__()
        self.theta = theta
        self.d_k = d_k
        self.max_seq_len = max_seq_len
        self.device = device

        inv_freq = 1.0 / (theta ** (torch.arange(0, d_k, 2).float() / d_k))
        self.register_buffer("inv_freq", inv_freq, False)

        t = torch.arange(self.max_seq_len)

        freqs = t.unsqueeze(1) * self.inv_freq.unsqueeze(0)  # [seq_len, d_k//2]
        emb = torch.repeat_interleave(freqs, 2, dim=-1)  # [seq_len, d_k]
        self.register_buffer("cos_cached", torch.cos(emb)[None, None, :, :], persistent=False)
        self.register_buffer("sin_cached", torch.sin(emb)[None, None, :, :], persistent=False)

    def forward(self, x: torch.Tensor, seq_length: int = None):
        if seq_length is None:
            seq_length = self.max_seq_len

        return (
            self.cos_cached[:, :, :seq_length, ...].to(dtype=x.dtype),
            self.sin_cached[:, :, :seq_length, ...].to(dtype=x.dtype)
        )

def rotate_half(x):
    """对连续的元素对进行旋转"""
    # 重新组织为 [..., d_k//2, 2] 的形状
    x = x.view(*x.shape[:-1], -1, 2)
    # 交换每一对的两个元素，第二个元素取负
    x_rotated = torch.stack([-x[..., 1], x[..., 0]], dim=-1)
    # 重新展平为原始形状
    return x_rotated.view(*x.shape[:-2], -1)

def apply_rotary_pos_emb(q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, position_ids: torch.Tensor=None):
    # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
    cos = cos.squeeze(1).squeeze(0)  # [seq_len, dim]
    sin = sin.squeeze(1).squeeze(0)  # [seq_len, dim]
    cos = cos[position_ids].unsqueeze(1)  # [bs, 1, seq_len, dim]
    sin = sin[position_ids].unsqueeze(1)  # [bs, 1, seq_len, dim]
    q_embed = (q * cos) + (rotate_half(q) * sin)
    k_embed = (k * cos) + (rotate_half(k) * sin)
    return q_embed, k_embed

class SoftMax(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x: torch.Tensor, dim: int):
        x = x - x.max(dim=dim, keepdim=True)[0] # torch.max 返回最大值，tensor.max 返回最大值和索引
        exp_sum = x.exp().sum(dim=dim, keepdim=True)
        x = x.exp() / exp_sum
        return x

class ScaleDotAttention(nn.Module):
    def __init__(self, ):
        super().__init__()
        self.softmax = SoftMax()

    def forward(self, Q: torch.Tensor, K: torch.Tensor, V: torch.Tensor, mask: torch.Tensor=None):
        d_k = Q.shape[-1]
        K = rearrange(K, "... m d_k -> ... d_k m")
        q_k = einsum(Q, K, "... n d_k, ... d_k m -> ... n m")
        if mask is not None:
            q_k = q_k.masked_fill(~mask, -np.inf)
        res = einsum(self.softmax(q_k / np.sqrt(d_k), dim=-1), V, "... n m, ... m d_v -> ... n d_v")
        return res
    
class MultiHeadSelfAttention(nn.Module):
    def __init__(self, d_model: int, num_heads: int):
        super().__init__()
        self.d_model = d_model
        self.num_heads = num_heads
        self.d_k = self.d_v = self.d_o = d_model // num_heads # 由于QK做内积，所及两个的隐藏维度相同

        self.q_proj = Linear(d_model, self.num_heads * self.d_k)
        self.k_proj = Linear(d_model, self.num_heads * self.d_k)
        self.v_proj = Linear(d_model, self.num_heads * self.d_v)
        self.output_proj = Linear(self.num_heads * self.d_v, d_model)

        self.attn = ScaleDotAttention()

    def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, mask: torch.Tensor=None):
        q = rearrange(self.q_proj(q), "b l (hn h_dim) -> hn b l h_dim", hn=self.num_heads)
        k = rearrange(self.k_proj(k), "b t (hn h_dim) -> hn b t h_dim", hn=self.num_heads)
        v = rearrange(self.v_proj(v), "b t (hn h_dim) -> hn b t h_dim", hn=self.num_heads)
        attn = self.attn(q, k, v, mask)
        attn = rearrange(attn, "h b l v->b l (h v)")
        o = self.output_proj(attn)
        return o

class MultiHeadSelfAttentionWithRope(nn.Module):
    def __init__(self, d_model:int, num_heads: int, max_seq_len: int, theta: float):
        super().__init__()
        self.d_model = d_model
        self.num_heads = num_heads
        self.d_k = self.d_v = d_model // num_heads # 由于QK做内积，所及两个的隐藏维度相同
        self.rope = RotaryPositionalEmbedding(theta, max_seq_len, self.d_k)

        self.q_proj = Linear(d_model, self.num_heads * self.d_k)
        self.k_proj = Linear(d_model, self.num_heads * self.d_k)
        self.v_proj = Linear(d_model, self.num_heads * self.d_v)
        self.output_proj = Linear(self.num_heads * self.d_v, d_model)

        self.attn = ScaleDotAttention()


    def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, mask: torch.Tensor=None, postion_ids: torch.LongTensor=None):
        q = rearrange(self.q_proj(q), "b l (hn h_dim) -> hn b l h_dim", hn=self.num_heads)
        k = rearrange(self.k_proj(k), "b t (hn h_dim) -> hn b t h_dim", hn=self.num_heads)
        v = rearrange(self.v_proj(v), "b t (hn h_dim) -> hn b t h_dim", hn=self.num_heads)
        cos, sin = self.rope(q)
        q_embed, k_embed = apply_rotary_pos_emb(q, k, cos, sin, postion_ids)
        attn = self.attn(q_embed, k_embed, v, mask)
        attn = rearrange(attn, "h b l v->b l (h v)")
        o = self.output_proj(attn)
        return o

class TransformerBlock(nn.Module):
    def __init__(self, d_model: int, num_heads: int, d_ff: int, max_seq_len: int, theta: float):
        super().__init__()
        self.d_model = d_model
        self.num_heads = num_heads
        self.d_ff = d_ff
        self.max_seq_len = max_seq_len
        self.theta = theta

        self.attn = MultiHeadSelfAttentionWithRope(d_model, num_heads, max_seq_len, theta)
        self.ln1 = RMSNorm(d_model)

        self.ffn = SwiGLU(d_model, d_ff)
        self.ln2 = RMSNorm(d_model)
    
    def forward(self, x: torch.Tensor, mask: torch.BoolTensor, position_ids: torch.LongTensor):
        normed = self.ln1(x)
        y = x + self.attn(normed, normed, normed, mask, position_ids)
        z = y + self.ffn(self.ln2(y))
        return z
    
class TransformerLM(nn.Module):
    def __init__(self, vocab_size: int, context_length: int, d_model: int, num_layers: int, num_heads: int, d_ff: int, rope_theta: float):
        super().__init__()
        self.vocab_size = vocab_size
        self.context_length = context_length
        self.d_model = d_model
        self.num_layers = num_layers
        self.num_heads = num_heads
        self.d_ff = d_ff
        self.rope_theta = rope_theta

        self.token_embeddings = Embedding(vocab_size, d_model)
        self.layers = nn.ModuleList([
            TransformerBlock(d_model, num_heads, d_ff, context_length, rope_theta) for _ in range(num_layers)
        ])
        self.ln_final = RMSNorm(d_model)
        self.lm_head = Linear(d_model, vocab_size)
    
    def forward(self, x: torch.Tensor, mask: torch.BoolTensor, position_ids: torch.LongTensor):
        x = self.token_embeddings(x)
        for block in self.layers:
            x = block(x, mask, position_ids)
        x = self.ln_final(x)
        x = self.lm_head(x)
        return x
    
    def generate(self, x: torch.Tensor, stop_ids: int, max_length: int, temperature: float, top_p: float, do_sample: bool = False):
        softmax = SoftMax()
        self.eval()
        with torch.no_grad():
            while True:
                mask = torch.ones_like(x, dtype=torch.bool, device=x.device)
                position_ids = torch.arange(x.shape[0], dtype=torch.long, device=x.device)
                out = self.forward(x, mask, position_ids)
                prob = softmax.forward(out / temperature, -1)[0, -1]
                sorted_prob = sorted([[p, i] for i,p in enumerate(prob.cpu().numpy().tolist())], key=lambda x: x[0], reverse=True)
                sum_ = 0
                prob_mask = torch.zeros_like(prob, device=prob.device, dtype=torch.bool)
                for p, i in sorted_prob:
                    sum_ += p
                    prob_mask[i] = True
                    if sum_ >= top_p:
                        break

                prob = prob_mask * prob
                prob /= prob.sum()

                if do_sample:
                    token_id = torch.multinomial(prob, num_samples=1)
                else:
                    token_id = torch.argmax(prob, dim=-1, keepdim=True)
                
                if token_id.item() == stop_ids:
                    break

                x = torch.cat([x, token_id.unsqueeze(0)], dim=1)

                if x.shape[1] == max_length:
                    break

        return x.cpu().numpy()
    

def compute_cross_entropy(logits: Float[Tensor, "... seq_len vocab_size"], target: Int[Tensor, "... seq_len"]):

    logits = logits - logits.max(-1, keepdim=True)[0]
    select = torch.gather(logits, -1, target.unsqueeze(-1))
    p = select.squeeze(-1) - logits.exp().sum(-1).log()
    loss = -p.mean()

    return loss


class SGD(torch.optim.Optimizer):

    def __init__(self, params, lr=1e-3):
        assert lr >= 0, print(f"lr:{lr} invalid")
        defaults = {"lr": lr}
        super().__init__(params, defaults)
    
    def step(self, closure=None):
        loss = None if closure is None else closure()
        for group in self.param_groups:
            lr = group['lr']
            for p in group['params']:
                if p.grad is None:
                    continue
                state = self.state[p]
                t = state.get("t", 0)
                grad = p.grad.data
                p.data -= lr / np.sqrt(t + 1) * grad
                state["t"] = t + 1

        return loss
    
class AdamW(torch.optim.Optimizer):
    def __init__(self, params, lr, weight_decay: float, betas: tuple[float, float], eps=1e-8):
        defaults = {
            "lr": lr,
            "weight_decay": weight_decay,
            "beta1": betas[0],
            "beta2": betas[1],
            "eps": eps
        }
        super().__init__(params, defaults)

    def step(self, closure=None):
        loss = None if closure is None else closure()
        for group in self.param_groups:
            lr = group['lr']
            weight_decay = group['weight_decay']
            beta1 = group['beta1']
            beta2 = group['beta2']
            eps = group['eps']
            for p in group['params']:
                if p.grad is None:
                    continue
                state = self.state[p]
                grad = p.grad.data
                m = state.get("m", torch.zeros_like(p))
                m = beta1 * m + (1 - beta1) * grad
                v = state.get("v", torch.zeros_like(p))
                v = beta2 * v + (1 - beta2) * grad.pow(2)
                t = state.get("t", 1)
                a = lr * np.sqrt((1 - np.pow(beta2, t))) / (1 - np.pow(beta1, t))
                p.data -= a * m / (torch.sqrt(v) + eps)
                p.data -= lr * weight_decay * p.data

                state['m'] = m
                state['v'] = v
                state['t'] = t + 1
        return loss

def lr_schedule(t: int, alpha_max: float, alpha_min: float, t_w: int, t_c: int) -> float:
    if t < t_w:
        return t / t_w * alpha_max
    elif t <= t_c:
        return alpha_min + 0.5 * (1 + np.cos((t - t_w) / (t_c - t_w) * np.pi)) * (alpha_max - alpha_min)
    else:
        return alpha_min

def grad_clip(params: list[nn.Parameter], max_l2_norm: float):
    total_norm = 0
    for p in params:
        if p.grad is None:
            continue
        param_norm = p.grad.data.norm(2)
        total_norm += param_norm.item() ** 2
    total_norm = total_norm ** (1./2.)
    clip_coef = max_l2_norm / (total_norm + 1e-6)
    if total_norm > max_l2_norm:
        for p in params:
            if p.grad is None:
                continue
            p.grad.data.mul_(clip_coef)

def load_data(dataset: np.array, batch_size: int, context_length: int, device: str) -> tuple[torch.Tensor, torch.Tensor]:
    import random
    max_index = len(dataset) - context_length - 1
    source = []
    target = []
    for _ in range(batch_size):
        start = random.choice(range(max_index + 1))
        end = start + context_length
        source.append(dataset[start:end])
        target.append(dataset[start+1:end+1])

    source = torch.tensor(np.array(source), dtype=torch.long, device=device)
    target = torch.tensor(np.array(target), dtype=torch.long, device=device)

    return source, target

def save_checkpoint(model: nn.Module, optimizer: torch.optim.Optimizer, iteration: int, out: str):
    tar = {
        'model': model.state_dict(),
        'optimizer': optimizer.state_dict(),
        'iteration': iteration
    }
    torch.save(tar, out)

def load_checkpoint(src: str, model: nn.Module, optimizer: torch.optim.Optimizer) -> int:
    all_info = torch.load(src)
    model.load_state_dict(all_info['model'])
    optimizer.load_state_dict(all_info['optimizer'])

    return all_info['iteration']



if __name__ == '__main__':
    # l = Linear(100, 20)
    # res = l.forward(torch.ones([128, 10, 100]))
    # print(res.shape)


    # emb = Embedding(1028, 200)
    # print(emb(torch.randint(0, 200, [128, 500], dtype=torch.long)).shape)

    # norm = RMSNorm(500, 1e-3)
    # print(norm(torch.randn([128, 50, 500])).shape)

    # swiglu = SwiGLU(1024)
    # print(swiglu(torch.randn([128, 200, 1024])).shape)

    # rope = RotaryPositionalEmbedding(10, 5, 6)

    # q = torch.randn(128, 4, 5, 6)
    # k = torch.randn(128, 4, 5, 6)
    # cos, sin = rope(q)

    # q_rotated, k_rotated = apply_rotary_pos_emb(q, k, cos, sin, torch.randint(0, 4, [128, 5]))

    # sclae_dot_attention = ScaleDotAttention()
    # sclae_dot_attention(torch.randn(128, 100, 64), torch.randn(128, 50, 64), torch.randn(128, 50, 64), torch.randint(0, 1, (128, 100, 50)) == 1 )

    # compute_cross_entropy(1000 * torch.randn((128, 100, 50)), torch.randint(0, 50, (128, 100)))

    # for lr in [1, 0.1, 0.01, 0.001]:
    #     weights = nn.Parameter(5 * torch.randn((10, 10)))
    #     opt = SGD([weights], lr)
    #     print('-----------------------------------------')
    #     for t in range(100):
    #         opt.zero_grad()
    #         loss = weights.pow(2).mean()
    #         print(loss.cpu().item())
    #         loss.backward()
    #         opt.step()
    # a = nn.Parameter(torch.randn((5, 5)))
    # a.sum().backward()
    # grad_clip(a, 0.2)

    # save_checkpoint(Linear(10, 10), SGD(Linear(10, 10).parameters()), 10, 'ckpt.pt')

    from cs336_basics.my_tokenizer import Tokenizer
    device = 'cuda:0'
    tk = Tokenizer.from_files(vocab_filepath='data/vocab.json', merges_filepath='data/merges.txt')
    lm = TransformerLM(500, 100, 100, 2, 2, 100, 100).to(device)
    in_str = "hello, I am your father"
    in_tokens = tk.encode(in_str)
    in_tensor = torch.tensor(np.array(in_tokens)).unsqueeze(0).to(device)
    res = lm.generate(in_tensor, 999, 1000, 0.5, 0.8, True)
    sentence = tk.decode(res.tolist()[0])
    print(sentence)