import torch
import cs336_basics
from cs336_basics.model import BasicsTransformerLM
import cs336_basics.model
from cs336_basics.nn_utils import cross_entropy, softmax
from cs336_basics.optimizer import AdamW
import timeit
import numpy as np
import torch.cuda.nvtx as nvtx
from jaxtyping import Float, Bool
from torch import Tensor
from einops import einsum
import math

@nvtx.range("scale dot product attention")
def annotated_scaled_dot_product_attention(
    Q: Float[Tensor, " ... queries d_k"],
    K: Float[Tensor, " ... keys    d_k"],
    V: Float[Tensor, " ... keys    d_v"],
    mask: Bool[Tensor, " ... queries keys"] | None = None,
) -> Float[Tensor, " ... queries d_v"]:
    """Scaled dot-product attention.

    This function implements Eq. 1 of the Transformer paper.

    Args:
        Q: Tensor of queries, may have any number of leading dimensions.
        K: Tensor of keys, sharing leading dimensions with Q.
        V: Tensor of values, sharding leading dimensions with Q and K.
        mask: An (optional) mask of shape (..., seq_len, seq_len).
            Attention scores for positions with a mask value of `False` should
            be masked out, i.e., not affect the softmaxed attention probabilities.

    Returns:
        torch.FloatTensor of shape (..., seq_len, value_dimension)
        with the output of running your scaled dot product attention
        implementation with the provided key, query, and value tensors.
    """

    d_k = K.shape[-1]
    with nvtx.range("computing attention scores"):
        attention_scores = einsum(Q, K, "... query d_k, ... key d_k -> ... query key") / math.sqrt(d_k)

        if mask is not None:
            attention_scores = torch.where(mask, attention_scores, float("-inf"))
    with nvtx.range("computing softmax"):
        attention_weights = softmax(attention_scores, dim=-1)  # Softmax over the key dimension

    with nvtx.range("final matmul"):
        res = einsum(attention_weights, V, "... query key, ... key d_v ->  ... query d_v")

    return res

cs336_basics.model.scaled_dot_product_attention = annotated_scaled_dot_product_attention

def small_model():
    return {"d_model": 768, "d_ff": 3072, "num_layers": 12, "num_heads": 12}


def medium_model():
    return {"d_model": 1024, "d_ff": 4096, "num_layers": 24, "num_heads": 16}


def large_model():
    return {"d_model": 1280, "d_ff": 5120, "num_layers": 36, "num_heads": 20}


def xl_model():
    return {"d_model": 1600, "d_ff": 6400, "num_layers": 48, "num_heads": 25}

def model_2_7():
    return {"d_model": 2560, "d_ff": 10240, "num_layers": 32, "num_heads": 32}

def benchmark(core_param: dict):
    device = "cuda:0"
    vocab_size = 1000
    batch_size = 1
    context_length = 256
    rope_theta = 10000.0
    model_param = {"vocab_size": vocab_size, "context_length": context_length, "rope_theta": rope_theta}
    model = BasicsTransformerLM(**model_param, **core_param).to(device)
    optimizer = AdamW(model.parameters())

    warmup_steps = 5
    in_features = torch.randint(0, vocab_size, (batch_size, context_length), dtype=torch.long, device=device)
    for _ in range(warmup_steps):
        out = model.forward(in_features)
        loss = cross_entropy(out, in_features)
        loss.backward()

    torch.cuda.synchronize()

    forward_times = []
    backward_times = []
    for _ in range(10):
        t0 = timeit.default_timer()
        out = model.forward(in_features)
        torch.cuda.synchronize()
        forward_times.append(timeit.default_timer() - t0)

        t0 = timeit.default_timer()
        loss = cross_entropy(out, in_features)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        torch.cuda.synchronize()
        backward_times.append(timeit.default_timer() - t0)

    np_forward = np.array(forward_times)
    np_backward = np.array(backward_times)
    return {
        "forward": f"Mean: {np_forward.mean()}, Std: {np_forward.std()}",
        "backward": f"Mean: {np_backward.mean()}, Std: {np_backward.std()}",
    }


if __name__ == "__main__":
    print(benchmark(medium_model()))
