import os
import sys

current_dir = os.path.dirname(os.path.abspath(__file__))
ch02_path = os.path.join(os.path.dirname(current_dir), 'ch02')
ch03_path = os.path.join(os.path.dirname(current_dir), 'ch03')
sys.path.append(ch02_path)
sys.path.append(ch03_path)


import torch
import torch.nn as nn
from tokenizer.gpt2 import GPT2Tokenizer
from transformer import GPTModel, LayerNorm, TransformerBlock, generate_text_simple

GPT_CONFIG_124M = {
    "vocab_size": 50257,
    "context_length": 1024,
    "emb_dim": 768,
    "n_heads": 12,
    "n_layers": 12,
    "drop_rate": 0.1,
    "qkv_bias": False
}

def main():
    test_layer_norm()
    test_shortcut()
    test_transformer_block()
    test_gpt_model()
    test_generate_text_simple()

def test_gpt_model():
    tokenizer = GPT2Tokenizer()

    batch = []
    txt1 = "Every effort moves you"
    txt2 = "Every day holds a"
    batch.append(tokenizer.encode(txt1))
    batch.append(tokenizer.encode(txt2))
    batch = torch.tensor(batch)

    model = GPTModel(GPT_CONFIG_124M)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")
    model.to(device)
    batch = batch.to(device)

    torch.manual_seed(123)
    logits = model(batch)
    print(f"output shape: {logits.shape}")
    total_params = sum(p.numel() for p in model.parameters())
    print(f"Total parameters: {total_params:,}")

    params_in_emb = sum(p.numel() for p in model.tok_emb.parameters())
    print(f"Params in tok_emb: {params_in_emb:,}")

    params_in_attn = sum(p.numel() for block in model.trf_blocks for p in block.attn.parameters())
    print(f"Params in attn: {params_in_attn:,}")
    params_in_ff = sum(p.numel() for block in model.trf_blocks for p in block.ff.parameters())
    print(f"Params in ff: {params_in_ff:,}")

    params_in_out_head = sum(p.numel() for p in model.out_head.parameters())
    print(f"Params in out_head: {params_in_out_head:,}")


def test_layer_norm():
    layer_norm = LayerNorm(10)
    x = torch.randn(10)
    print(f"x: {x}")
    print(f"layer_norm(x): {layer_norm(x)}")
    print(f"mean: {x.mean()}")
    print(f"std: {x.std()}")

def test_shortcut():
    class DemoDeepNN(nn.Module):
        def __init__(self, layer_sizes, use_shortcut, act_fn):
            super().__init__()
            self.use_shortcut = use_shortcut
            self.act_fn = act_fn
            layers = []
            for i in range(len(layer_sizes) - 1):
                layers.append(nn.Linear(layer_sizes[i], layer_sizes[i+1]))
                layers.append(self.act_fn)
            self.layers = nn.ModuleList(layers)

        def forward(self, x):
            for layer in self.layers:
                out = layer(x)
                if self.use_shortcut and out.shape == x.shape:
                    x = x + out
                else:
                    x = out
            return x
    
    layer_sizes = [3, 3, 3, 3, 1]
    use_shortcut = False
    act_fn = nn.ReLU()

    model = DemoDeepNN(layer_sizes, use_shortcut, act_fn)
    x = torch.tensor([1.0, 2.0, 3.0])
    print(f"x: {x}")
    print(f"model(x): {model(x)}")

    target = torch.tensor([[0.0]])
    loss_fn = nn.MSELoss()
    loss = loss_fn(model(x), target)
    print(f"loss: {loss}")

    loss.backward()
    for name, param in model.named_parameters():
        if "weight" in name:
            print(f"{name} has gradient mean: {param.grad.abs().mean()}")

def test_transformer_block():
    config = GPT_CONFIG_124M
    transformer_block = TransformerBlock(config)
    x = torch.randn(2, 4, 768)
    block = transformer_block(x)
    print(f"x shape: {x.shape}")
    print(f"Output shape: {block.shape}")

def test_generate_text_simple():
    config = GPT_CONFIG_124M
    tokenizer = GPT2Tokenizer()
    model = GPTModel(GPT_CONFIG_124M)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)

    txt = "Every effort moves you"
    idx = tokenizer.encode(txt)
    idx = torch.tensor(idx).unsqueeze(0).to(device)
    print(f"idx shape: {idx.shape}")
    idx = generate_text_simple(model, idx, config["context_length"], config["context_length"])
    print(f"idx shape: {idx.shape}")
    print(tokenizer.decode(idx[0].tolist()))
    
    

if __name__ == "__main__":
    main()
