import torch
from sklearn.metrics import f1_score
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataset import T_co
from tqdm import tqdm

TRAIN_FILE = './data/train_corpus.txt'
TRAIN_LABEL_FILE = './data/train_label.txt'
TEST_FILE = './data/test_corpus.txt'
TEST_LABEL_FILE = './data/test_label.txt'

START_TAG = "<START>"
STOP_TAG = "<STOP>"

BATCH_SIZE = 5
EMBEDDING_DIM = 50
HIDDEN_DIM = 50
EPOCH = 1
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def argmax(vec):
    _, idx = torch.max(vec, 1)
    return idx.item()


def log_sum_exp(vec):
    max_score = vec[0, argmax(vec)]
    max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
    return max_score.to(DEVICE) + torch.log(torch.sum(torch.exp(vec - max_score_broadcast))).to(DEVICE)


def build_corpus(split, data_dir):
    assert split in ['train', 'test', 'dev']
    word_lists = []
    tag_lists = []
    with open(data_dir, 'r', encoding='utf-8') as f:
        word_list = []
        tag_list = []
        for line in f:
            if line != '\n':
                word, tag = line.strip('\n').split()
                word_list.append(word)
                tag_list.append(tag)
            else:
                word_lists.append(word_list)
                tag_lists.append(tag_list)
                word_list = []
                tag_list = []

    sorted_word_lists = sorted(word_lists, key=lambda x: len(x), reverse=False)
    sorted_tag_lists = sorted(tag_lists, key=lambda x: len(x), reverse=False)

    word_2_index = build_map(sorted_word_lists)
    tag_2_index = build_map(sorted_tag_lists)

    word_2_index['<UNK>'] = len(word_2_index)
    tag_2_index['<UNK>'] = len(tag_2_index)

    word_2_index['<PAD>'] = 0
    tag_2_index['<PAD>'] = 0

    return word_lists, tag_lists, word_2_index, tag_2_index


def build_map(lists):
    maps = {}
    for li in lists:
        for e in li:
            if e not in maps:
                maps[e] = len(maps)
    return maps


class CorpusDataset(Dataset):
    def __init__(self, word_lists, tag_lists, word_index, tag_index):
        self.word_lists = word_lists
        self.tag_lists = tag_lists
        self.word2index = word_index
        self.tag2index = tag_index
        pass

    def __getitem__(self, index) -> T_co:
        word_list = self.word_lists[index]
        tag_list = self.tag_lists[index]

        word_index = [self.word2index[i] for i in word_list]
        tag_index = [self.tag2index[i] for i in tag_list]
        return word_index, tag_index

    def __len__(self):
        assert len(self.word_lists) == len(self.tag_lists)
        return len(self.word_lists)

    def pro_batch_data(self, batch_data):
        device = DEVICE
        data_list = []
        tag_list = []
        batch_lens = []
        for data, tag in batch_data:
            data_list.append(data)
            tag_list.append(tag)
            batch_lens.append(len(data))

        batch_max_len = max(batch_lens)
        data_list = [i + [self.word2index["<PAD>"]] * (batch_max_len - len(i)) for i in data_list]
        tag_list = [i + [self.tag2index["<PAD>"]] * (batch_max_len - len(i)) for i in tag_list]
        data_list = torch.tensor(data_list, dtype=torch.int64, device=device)
        tag_list = torch.tensor(tag_list, dtype=torch.long, device=device)
        return data_list, tag_list


class BiLSTMCRF(nn.Module):

    def __init__(self, vocab_size, tag_to_index, embedding_dim, hidden_dim):
        super(BiLSTMCRF, self).__init__()
        self.embedding_dim = embedding_dim  # word embedding dim
        self.hidden_dim = hidden_dim  # Bi-LSTM hidden dim
        self.vocab_size = vocab_size
        self.tag_to_ix = tag_to_index
        self.target_size = len(tag_to_index)

        self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,
                            num_layers=1, bidirectional=True)
        self.hidden2tag = nn.Linear(hidden_dim, self.target_size)

        self.transitions = nn.Parameter(
            torch.randn(self.target_size, self.target_size))

        self.transitions.data[tag_to_index[START_TAG], :] = -10000
        self.transitions.data[:, tag_to_index[STOP_TAG]] = -10000

        self.hidden = self.init_hidden()

    def init_hidden(self):
        return (torch.randn(2, 1, self.hidden_dim // 2).to(DEVICE),
                torch.randn(2, 1, self.hidden_dim // 2).to(DEVICE))

    def _get_lstm_features(self, sentence):
        self.hidden = self.init_hidden()
        embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
        lstm_out, self.hidden = self.lstm(embeds, self.hidden)
        lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
        lstm_feats = self.hidden2tag(lstm_out)
        return lstm_feats

    def _score_sentence(self, feats, sentence_tag):
        sentence_score = torch.zeros(1).to(DEVICE)
        sentence_tag = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long).to(DEVICE), sentence_tag])
        for i, feat in enumerate(feats):
            sentence_score = sentence_score + self.transitions[sentence_tag[i + 1], sentence_tag[i]] + \
                             feat[sentence_tag[i + 1]]
        sentence_score = sentence_score + self.transitions[self.tag_to_ix[STOP_TAG], sentence_tag[-1]]
        return sentence_score

    def _forward_alg(self, feats):
        init_alphas = torch.full((1, self.target_size), -10000.).to(DEVICE)
        init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
        previous = init_alphas

        for obs in feats:
            # The forward tensors at this timestamp
            alphas_t = []
            for next_tag in range(self.target_size):
                emit_score = obs[next_tag].view(1, -1).expand(1, self.target_size).to(DEVICE)
                trans_score = self.transitions[next_tag].view(1, -1)
                next_tag_var = previous.to(DEVICE) + trans_score.to(DEVICE) + emit_score.to(DEVICE)
                alphas_t.append(log_sum_exp(next_tag_var).view(1))
            previous = torch.cat(alphas_t).view(1, -1)
        terminal_var = previous + self.transitions[self.tag_to_ix[STOP_TAG]]
        scores = log_sum_exp(terminal_var)
        return scores.to(DEVICE)

    def _viterbi_decode(self, feats):
        back_pointers = []

        init_vi_vars = torch.full((1, self.target_size), -10000.).cpu()
        init_vi_vars[0][self.tag_to_ix[START_TAG]] = 0

        previous = init_vi_vars
        for obs in feats:
            back_ptr_t = []
            viterbi_vars_t = []
            for next_tag in range(self.target_size):
                next_tag_var = previous.cpu() + self.transitions[next_tag].cpu()
                best_tag_id = argmax(next_tag_var)
                back_ptr_t.append(best_tag_id)
                viterbi_vars_t.append(next_tag_var[0][best_tag_id].view(1))

            previous = (torch.cat(viterbi_vars_t).cpu() + obs.cpu()).view(1, -1)
            back_pointers.append(back_ptr_t)

        terminal_var = previous.cpu() + self.transitions[self.tag_to_ix[STOP_TAG]].cpu()
        best_tag_id = argmax(terminal_var)
        path_score = terminal_var[0][best_tag_id]

        best_path = [best_tag_id]
        for back_ptr_t in reversed(back_pointers):
            best_tag_id = back_ptr_t[best_tag_id]
            best_path.append(best_tag_id)

        start = best_path.pop()
        assert start == self.tag_to_ix[START_TAG]  # Sanity check
        best_path.reverse()
        return path_score, best_path

    def neg_log_likelihood(self, sentence, all_tags):
        feats = self._get_lstm_features(sentence)
        forward_score = self._forward_alg(feats)
        gold_score = self._score_sentence(feats, all_tags)
        return forward_score - gold_score

    def forward(self, sentence):
        sentence = sentence.reshape(-1)
        lstm_feats = self._get_lstm_features(sentence)
        vi_score, tag_seq = self._viterbi_decode(lstm_feats)
        return vi_score, tag_seq


train_data, train_tag, word2index, tag2index = build_corpus('train', 'data/train.txt')
dev_data, dev_tag, word2index_dev, tag2index_dev = build_corpus("dev", "data/test.txt")

tag_to_ix = {'O': 0,
             'B-LOC': 1,
             'I-LOC': 2,
             'B-ORG': 3,
             'I-ORG': 4,
             'B-PER': 5,
             'I-PER': 6,
             START_TAG: 7,
             STOP_TAG: 8,
             '<PAD>': 9
             }

train_dataset = CorpusDataset(train_data, train_tag, word2index, tag_to_ix)
train_dataloader = DataLoader(train_dataset, BATCH_SIZE, shuffle=False, collate_fn=train_dataset.pro_batch_data)

dev_dataset = CorpusDataset(dev_data, dev_tag, word2index_dev, tag_to_ix)
dev_dataloader = DataLoader(dev_dataset, BATCH_SIZE, shuffle=False, collate_fn=dev_dataset.pro_batch_data)

model = BiLSTMCRF(len(word2index), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM).to(DEVICE)

optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)

try:
    model.load_state_dict(torch.load('bilstm_crf.pth'))
except (FileNotFoundError, RuntimeError):
    print('read pth file Error, start to train a new model')
    for epoch in range(EPOCH):
        with tqdm(total=len(train_dataloader), ncols=150) as _tqdm:
            _tqdm.set_description(f'epoch:{epoch+1}/{EPOCH}')
            total_loss = 0
            for batch_num, (sentences, tags) in enumerate(train_dataloader):
                sentences = sentences.reshape(-1)
                tags = tags.reshape(-1)
                model.zero_grad()
                loss = model.neg_log_likelihood(sentences, tags).cuda()
                loss.backward()
                optimizer.step()

                total_loss += loss.item()

                _tqdm.set_postfix(crf_loss=f'{total_loss / (batch_num + 1)}')
                _tqdm.update(1)

    torch.save(model.state_dict(), './bilstm_crf.pth')

model.eval()
all_pre = []
all_tag = []
with tqdm(total=len(dev_dataloader), ncols=100) as _tqdm:
    for dev_sentences, dev_tags in tqdm(dev_dataloader):
        dev_sentences.view(-1)
        dev_tags.view(-1)
        dev_pre_score, dev_pre_tag = model.forward(dev_sentences)
        all_pre.extend(dev_pre_tag)
        dev_tags_flat = dev_tags.detach().cpu().reshape(-1).tolist()
        all_tag.extend(dev_tags_flat)
        _tqdm.update(1)
score = f1_score(all_tag, all_pre, average="micro")
print("f1_score:", score)
