import torch.nn as nn
import random
import torch
import numpy as np
import os
from torch.utils import data
from comp.DataloadernoConJP import DataloadernoConJP as Dataloader

data_params: dict = {'batch_size': 300,
                     'shuffle': True,
                     'num_workers': 6}

training_set = Dataloader('', '', 20)
train_data_iterator = data.DataLoader(training_set, **data_params)


class Encoder(nn.Module):
    def __init__(self, input_dim, emb_dim, hid_dim, n_layers=2, dropout=0.8):
        super().__init__()

        self.hid_dim = hid_dim
        self.n_layers = n_layers

        # self.embedding = nn.Embedding(input_dim, emb_dim)
        # self.line1=nn.Linear()

        self.dropout = nn.Dropout(dropout)

        self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout=dropout)

    def forward(self, src):
        # src = [src len, batch size]

        # embedded = self.dropout(self.embedding(src))
        embedded = self.dropout(src)
        # embedded = [src len, batch size, emb dim]

        outputs, (hidden, cell) = self.rnn(embedded)

        # outputs = [src len, batch size, hid dim * n directions]
        # hidden = [n layers * n directions, batch size, hid dim]
        # cell = [n layers * n directions, batch size, hid dim]

        # outputs are always from the top hidden layer

        return outputs, hidden, cell


class Decoder(nn.Module):
    def __init__(self, output_dim, emb_dim, hid_dim, n_layers=2, dropout=0.8):
        super().__init__()

        self.output_dim = output_dim
        self.hid_dim = hid_dim
        self.n_layers = n_layers

        # self.embedding = nn.Embedding(output_dim, emb_dim)

        self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout=dropout)

        self.fc_out = nn.Linear(hid_dim, output_dim)

        self.dropout = nn.Dropout(dropout)

    def forward(self, input):
        # input = [batch size]
        # hidden = [n layers * n directions, batch size, hid dim]
        # cell = [n layers * n directions, batch size, hid dim]

        # n directions in the decoder will both always be 1, therefore:
        # hidden = [n layers, batch size, hid dim]
        # context = [n layers, batch size, hid dim]

        # input = input.unsqueeze(0)

        # input = [1, batch size]

        embedded = self.dropout(input)

        # embedded = [1, batch size, emb dim]

        output, (hidden, cell) = self.rnn(embedded)

        # output = [seq len, batch size, hid dim * n directions]
        # hidden = [n layers * n directions, batch size, hid dim]
        # cell = [n layers * n directions, batch size, hid dim]

        # seq len and n directions will always be 1 in the decoder, therefore:
        # output = [1, batch size, hid dim]
        # hidden = [n layers, batch size, hid dim]
        # cell = [n layers, batch size, hid dim]

        # prediction = self.fc_out(output.squeeze(0))
        prediction = output
        # prediction = [batch size, output dim]

        return prediction


class Seq2seq(nn.Module):
    def __init__(self):
        super(Seq2seq, self).__init__()
        self.en = Encoder(0, 20, 30, n_layers=2)
        self.de = Decoder(3, 30, 3)

    def forward(self, inp):
        outputs, hidden, cell = self.en(inp)
        out = self.de(outputs)

        return out

model=Seq2seq()
optimizer = torch.optim.Adam(model.parameters())
certifi=nn.L1Loss()
model.train()

for epoch in range(500):

    for num_steps, data in enumerate(train_data_iterator):
        lyrics_seq = data[0]
        discrete_val_seq = data[1]

        out_seq2seq=model(lyrics_seq)
        loss=certifi(discrete_val_seq,out_seq2seq)
        print('Epoch : {} ,step {}, loss : {}'.format(epoch+1,num_steps+1,loss))
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if (epoch+1)%50==0 and num_steps==0:
            np.save(os.path.join('seq2seqout', 'epoch{}_seq2seq_data'.format(epoch+1)),
                torch.cat((discrete_val_seq, out_seq2seq.detach()), dim=2))
        if num_steps+1==5:
            break

# model.eval()



