import torch
from torch import nn
from transformer import Decoder, create_NaiveAttention, create_NaiveFFN
from tqdm import tqdm

device='cuda' if torch.cuda.is_available else 'cpu'

    
class GenerativeTransformer(nn.Module):
    def __init__(self):
        # [1, 2, 3, 4, 5, 6, 7, 8, 9]
        super(GenerativeTransformer, self).__init__()
        self.embedding=nn.Embedding(
            num_embeddings=10,
            embedding_dim=128,
            padding_idx=0,
        )
        
        attn=create_NaiveAttention(
            input_dim=128,
            hidden_dim=128,
            output_dim=128,
            num_heads=4
        )
        ffn=create_NaiveFFN(
            input_dim=128,
            hidden_dim=128,
            output_dim=128
        )
        self.decoder=Decoder(
            num_decoders=4,
            attn=attn,
            ffn=ffn
        )
        
        self.classifier=nn.Sequential(
            nn.Linear(128, 256),
            nn.ReLU(),
            nn.Linear(256, 128)
        )
        
    def forward(self, x):
        embedding=self.embedding(x)
        out=self.decoder(embedding)
        result=self.classifier(out)
        return result
    
    def generate(self):
        x=torch.tensor([[1]+[0]*8], device=device)
        result=[]
        with torch.no_grad():
            for i in range(7):
                output=self.forward(x)[0][i]
                value=torch.argmax(output).item()
                result.append(torch.argmax(output).item())
                x[0][i+1]=value
        return result


def main():
    model=GenerativeTransformer()
    model.to(device)
    optimizer=torch.optim.Adam(model.parameters(), lr=1e-4)
    #scheduler = torch.optim.lr_scheduler.LambdaLR()
    input=torch.arange(1, 9, device=device).unsqueeze(0)
    label=torch.arange(2, 10, device=device, dtype=torch.long).unsqueeze(0)
    loss_fc=nn.CrossEntropyLoss()
    # train
    for e in range(10000):
        output=model(input)
        output=torch.permute(output, [0,2,1])
        loss=loss_fc(output, label)
        loss.backward()
        if e%100==0:
            optimizer.step()
            optimizer.zero_grad()
            #scheduler.step(loss.item())
            print('loss=', loss.item())
            
    
    result=model.generate()
    print(result)
    
if __name__=='__main__':
    x=torch.ones([3, 4, 4])
    main()
    