import torch 
import tqdm
from dataloader import make_dataloader, get_data
from model import RNN 
from configs import BATCH_SIZE, DATA_PATH, EMB_DIM, IN_HIDEN1, IN_HIDEN2, IN_LSTM, LSTM_LAYER, OUT_LSTM, OUT_HIDEN1, OUT_HIDEN2, DEVICE, LEARNING_RATE, EPOCHES

def train():
    _, word2ix, ix2word = get_data(data_path=DATA_PATH)
    dataloader = make_dataloader(data_path=DATA_PATH,batch_size=BATCH_SIZE)
    model = RNN(vocab_size=len(word2ix),embedding_dim=EMB_DIM,input_hidden1=IN_HIDEN1,input_hidden2=IN_HIDEN2,lstm_input=IN_LSTM,lstm_layers=LSTM_LAYER,lstm_output=OUT_LSTM,output_hidden1=OUT_HIDEN1,output_hidden2=OUT_HIDEN2).to(DEVICE)
    criterion = torch.nn.CrossEntropyLoss().to(DEVICE)
    optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
    for epoch in range(EPOCHES):
        for i, data in tqdm.tqdm(enumerate(dataloader)): 
            data = data.long().contiguous().to(DEVICE)
            optimizer.zero_grad()
            # loss = 0.
            # input, target = data[:-1, :], data[1:, :]
            # for j in range(data.shape[1]-20,data.shape[1]):
            #     output,_ = model(data[:,:j])
            #     target = data[:,1:j+1]
            #     #print(output.shape,target.shape,target.reshape(-1).shape)
            #     loss += criterion(output, target.reshape(-1))
            #     #loss.backward()
            #     #optimizer.step()
            input = data[:,:-1]
            target = data[:,1:].contiguous()
            output, _ = model(input)
            loss = criterion(output, target.view(-1))
            # loss /= 20
            loss.backward()
            optimizer.step()
        torch.save(model.state_dict(), 'ckpt/%s_%s.pth' % ('RNN', epoch))
        print('Epoch[{}/{}]],Loss: {:.4f}'.format(epoch,EPOCHES,loss))

        
train()
