"""
Code adapted from:
https://pytorch.org/tutorials/beginner/nlp/advanced_tutorial.html
"""
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   main.py
@Time    :   2022-03-01 10:59:22
@Author  :   GuoLiuFang
@Version :   0.1
@Contact :   guoliufangking@gmail.com
@License :   (C)Copyright 2018-2022, RandomMatrix
@Desc    :   None
'''
import logging
file_handler = logging.FileHandler(filename='log.log')
stdout_handler = logging.StreamHandler()
logging.basicConfig(
    level=logging.DEBUG,
    handlers=[file_handler, stdout_handler],
    format='%(asctime)s - %(processName)s - %(name)s - %(relativeCreated)d - %(threadName)s - %(levelname)s -- %(message)s'
)

# import other libs

import torch
import torch.optim as optim

from constants import Const
from bilstm_crf import BiLSTM_CRF

# for reproducibility
torch.manual_seed(1)
from data import get_express_dataloader
import os
def get_word_and_tag_vocab(training_data):
    word_to_ix = {
        Const.UNK_TOKEN: Const.UNK_ID,
        Const.PAD_TOKEN: Const.PAD_ID,
        Const.BOS_TOKEN: Const.BOS_ID,
        Const.EOS_TOKEN: Const.EOS_ID,
    }

    tag_to_ix = {
        Const.PAD_TAG_TOKEN: Const.PAD_TAG_ID,
        Const.BOS_TAG_TOKEN: Const.BOS_TAG_ID,
        Const.EOS_TAG_TOKEN: Const.EOS_TAG_ID,
    }

    for sentence, tags in training_data:
        for word, tag in zip(sentence, tags):
            if word not in word_to_ix:
                word_to_ix[word] = len(word_to_ix)
            if tag not in tag_to_ix:
                tag_to_ix[tag] = len(tag_to_ix)

    return word_to_ix, tag_to_ix


def prepare_sequence(seq, stoi):
    return torch.tensor([stoi[w] for w in seq], dtype=torch.long)


def ids_to_tags(seq, itos):
    return [itos[x] for x in seq]


if __name__ == "__main__":
# Get the current working directory
    cwd = os.getcwd()

    # Print the current working directory
    print(f"当前目录是: {cwd}")

    train_dataloader, train_dataset = get_express_dataloader("../PaddleNLP_NER_CRF/express_ner/train.txt", batch_size=4)
    dev_dataloader, _ = get_express_dataloader("../PaddleNLP_NER_CRF/express_ner/dev.txt", batch_size=4,train=False, sequence_vocab=train_dataset.sequence_vocab, tag_vocab=train_dataset.tag_vocab)
    test_dataloader, _ = get_express_dataloader("../PaddleNLP_NER_CRF/express_ner/test.txt",batch_size=4 ,train=False, sequence_vocab=train_dataset.sequence_vocab, tag_vocab=train_dataset.tag_vocab)
    batch_x_seqs, batch_y_tags, batch_mask = next(iter(dev_dataloader))

    # see bilstm_crf.py
    model = BiLSTM_CRF(len(train_dataset.sequence_vocab), len(train_dataset.tag_vocab))
    optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)

    # Check predictions before training
    print('Predictions before training:')
    with torch.no_grad():
        scores, seqs = model(batch_x_seqs, mask=batch_mask)
        for score, seq in zip(scores, seqs):
            str_seq = " ".join(ids_to_tags(seq, train_dataset.tag_vocab.itos))
            print('%.2f: %s' % (score.item(), str_seq))
    logging.info("开始训练了")
    # Make sure prepare_sequence from earlier in the LSTM section is loaded
    for epoch in range(300):  # normally you would NOT do 300 epochs, it is toy data
        logging.info(f"这是第{epoch}epoch")
        logging.info("*"*100)
        for it, (x_seqs, y_tags, mask) in enumerate(train_dataloader):
        
            # Step 1. Remember that Pytorch accumulates gradients.
            # We need to clear them out before each instance
            model.zero_grad()

            # Step 2. Get our inputs ready for the network, that is,
            # turn them into Tensors of word indices.
            sentence_in = x_seqs
            targets = y_tags

            # Step 3. Run our forward pass.
            loss = model.loss(sentence_in, targets, mask=mask)
            logging.info(f"第{epoch}epoch的第{it}次迭代，loss的type是{loss.shape},loss的值是{loss}")
            # Step 4. Compute the loss, gradients, and update the parameters by
            # calling optimizer.step()
            loss.backward()
            optimizer.step()

    # Check predictions after training
    print('Predictions after training:')
    with torch.no_grad():
        scores, seqs = model(batch_x_seqs, mask=batch_mask)
        for score, seq in zip(scores, seqs):
            str_seq = " ".join(ids_to_tags(seq, train_dataset.tag_vocab.itos))
            print('%.2f: %s' % (score.item(), str_seq))
