import torch
import unicodedata
import string
import os
from collections import Counter
from torch.utils.data import Dataset, DataLoader
import numpy as np
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from sklearn.metrics import f1_score

batch_size = 64
learning_rate = 0.01
epochs = 30
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_rate = 0.8

all_letters = string.ascii_letters + ".,;'"
def unicode2accii(s):
    return ''.join(
        c for c in unicodedata.normalize('NFD', s)
        if unicodedata.category(c) != 'Mn'
        and c in all_letters
    )

def preprocess():
    names = []
    labels = []

    for file in os.listdir('data/data/names'):
        with open('data/data/names/' + file, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            for line in lines:
                line = line.strip("\n")
                names.append(unicode2accii(line))
                labels.append(os.path.splitext(file)[0])

    name_seq = "".join(names)
    name_freq = dict(Counter(name_seq).most_common())
    idx2name = ['<pad>'] + [name for name in name_freq.keys()]
    name2idx = {name:idx for idx, name in enumerate(idx2name)}

    label_freq = dict(Counter(labels).most_common())
    idx2label = [label for label in label_freq.keys()]
    label2idx = {label:idx for idx, label in enumerate(idx2label)}

    return names, labels, idx2name, name2idx, idx2label, label2idx

def split_data(names, labels):
    num_train = int(len(names) * train_rate)
    shuffled_indices = np.arange(len(names))
    np.random.shuffle(shuffled_indices)
    names = np.array(names)[shuffled_indices]
    labels = np.array(labels)[shuffled_indices]

    train_names = names[:num_train]
    val_names = names[num_train:]
    train_labels = labels[:num_train]
    val_labels = labels[num_train:]

    return train_names, val_names, train_labels, val_labels

class NameDataset(Dataset):
    def __init__(self, names, labels, idx2name, name2idx, idx2label, label2idx):
        self.names = names
        self.labels = labels
        self.name2idx = name2idx
        self.idx2name = idx2name
        self.idx2label = idx2label
        self.name2idx = name2idx
        self.label2idx = label2idx

        self.name_lens = [len(name) for name in self.names]
        sorted_index = np.argsort(np.array(self.name_lens))
        sorted_index = sorted_index[::-1]
        self.names = np.array(self.names)[sorted_index].tolist()
        self.labels = np.array(self.labels)[sorted_index].tolist()
        self.name_lens = np.array(self.name_lens)[sorted_index].tolist()

        self.names = [[self.name2idx[c] for c in name] for name in self.names]
        self.labels = [self.label2idx[label] for label in self.labels]

    def __len__(self):
        return len(self.names)

    def __getitem__(self, idx):
        return self.names[idx], self.labels[idx], self.name_lens[idx]

def collate_fn(batch):
    # batch: [(name, label, name_len), (name, label, name_len), ...] 4
    # batch_names = torch.tensor([data[0] for data in batch], dtype=torch.long)
    batch_names = [torch.tensor(data[0], dtype=torch.long) for data in batch]
    batch_labels = torch.tensor([data[1] for data in batch], dtype=torch.long)
    batch_lens = torch.tensor([data[2] for data in batch], dtype=torch.long)
    # batch_names = [tensor([1, 2, 3, 4]), tensor([4, 5, 6]), tensor([7, 8]), ...]

    pad_batch_names = pad_sequence(batch_names, batch_first=False, padding_value=0)

    return pad_batch_names, batch_labels, batch_lens

def test_dataset():
    names, labels, idx2name, name2idx, idx2label, label2idx = preprocess()
    dataset = NameDataset(names, labels, idx2name, name2idx, idx2label, label2idx)
    dataloader = DataLoader(dataset, batch_size=4, shuffle=False, collate_fn=collate_fn)
    for step, batch in enumerate(dataloader):
        break

class LSTMModel(torch.nn.Module):
    def __init__(self, vocab_size, word_vec_dim, hidden_size, output_size):
        super(LSTMModel, self).__init__()
        self.embedding = torch.nn.Embedding(vocab_size, word_vec_dim)
        self.rnn = torch.nn.LSTM(word_vec_dim, hidden_size, batch_first=False, bidirectional=True, num_layers=1)
        self.fc = torch.nn.Linear(hidden_size, output_size)

    def forward(self, x, x_len):
        '''
        :param x: [s, B]
        :return:
        '''
        emb = self.embedding(x) # [s, B, d]
        packed = pack_padded_sequence(emb, x_len, batch_first=False)
        output, (h, c) = self.rnn(packed) # h: [2, B, d]
        logits = self.fc(h[0] + h[1])
        return logits

def train(model, train_dataloader, val_dataloader):
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    for epoch in range(epochs):
        model.train()
        for step, batch in enumerate(train_dataloader):
            batch_names, batch_labels, batch_lens = batch
            batch_names = batch_names.to(device)
            batch_labels = batch_labels.to(device)
            # batch_lens = batch_lens.to(device)

            pred_logits = model(batch_names, batch_lens)
            loss = criterion(pred_logits, batch_labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print("epoch: {}, step: {}, loss: {:.4f}".format(epoch, step, loss.item()))

        # validation
        f1 = test(model, val_dataloader)
        print("validation F1 score: {:.4f}".format(f1))

def test(model, dataloader):
    model.eval()

    golden_labels = []
    pred_labels = []
    with torch.no_grad():
        for step, batch in enumerate(dataloader):
            batch_names, batch_labels, batch_lens = batch
            batch_names = batch_names.to(device)
            batch_labels = batch_labels.to(device)

            pred_logits = model(batch_names, batch_lens)
            pred_batch_labels = torch.argmax(pred_logits, dim=-1)
            pred_labels.extend(pred_batch_labels.cpu().numpy().tolist())
            golden_labels.extend(batch_labels.cpu().numpy().tolist())

    f1 = f1_score(golden_labels, pred_labels, average='macro')
    return f1

if __name__ == '__main__':
    names, labels, idx2name, name2idx, idx2label, label2idx = preprocess()
    # test_dataset()
    model = LSTMModel(len(idx2name), 50, 128, len(label2idx))
    model.to(device)

    train_names, val_names, train_labels, val_labels = split_data(names, labels)

    train_dataset = NameDataset(train_names, train_labels, idx2name, name2idx, idx2label, label2idx)
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)

    val_dataset = NameDataset(val_names, val_labels, idx2name, name2idx, idx2label, label2idx)
    val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)

    train(model, train_dataloader, val_dataloader)

    # print("finish")
