import os
import numpy as np 
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report
import sklearn 
import warnings
import sklearn.exceptions
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)

import torch
from torch.utils.data import Dataset,DataLoader
import torch.nn as nn
import torch.optim as optim

from model.LSTM_CRF import NERLSTM_CRF

#set random seed
seed = 666666
torch.manual_seed(seed)            
torch.cuda.manual_seed(seed)       
torch.cuda.manual_seed_all(seed) 

def load_dict(dict_path):
    vocab = {}
    i = 0
    for line in open(dict_path, 'r', encoding='utf-8'):
        key = line.strip('\n')
        vocab[key] = i
        i += 1
    return vocab, {v: k for k, v in vocab.items()}

def convert_tokens_2_id(tokens_list, token2id_dict):
    '''
    tokens_list should be [a,b,c,d,e,f,g]
    '''
    new_tokens_list = []
    for i in tokens_list:
        if i not in token2id_dict.keys():
            i = 'OOV'
        new_tokens_list.append(token2id_dict[i])
    return new_tokens_list

# Hyper parameters
BATCH_SIZE = 64
DEVICE = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu') # cuda值的选取视服务器GPU使用情况而定
LR = 0.001  
DROP_OUT = 0.2
EMBED_DIM = 100
HIDDEN_DIM = 200
WEIGTH_DECAY = 1e-5  # 正则化系数 默认值1e-5
EPOCH = 3
BEST_F1 = 0

train = torch.load('deep learning/train_data')
test = torch.load('deep learning/test_data')

word2id, id2word = load_dict('deep learning/word2id.txt')
tag2id, id2tag = load_dict('deep learning/tag2id.txt')
model_output_dir = 'deep learning/BILSTM_CRF/save_dict'
model_pth_name = 'BEST_F1_%s.pth'
i=0
while True:
    if os.path.exists(os.path.join(model_output_dir, model_pth_name%i)):
        i+=1
    else:
        model_pth_name = model_pth_name%i
        break


# for idx, (x, y) in enumerate(train):
#     x_train[idx] = convert_tokens_2_id(x, word2id)
#     y_train[idx] = convert_tokens_2_id(y, tag2id)
# for idx, (x, y) in enumerate(zip(x_test, y_test)):
#     x_test[idx] = convert_tokens_2_id(x, word2id)
#     y_test[idx] = convert_tokens_2_id(y, tag2id)

# create Dataset
class NERDataset(Dataset):
    def __init__(self,data):
        self.data = data

    def __getitem__(self, index):
        return self.data[index][0],self.data[index][1]

    def __len__(self):
        return len(self.data)

# dataloader ：padding 
def padding(data):
    src,tgt = zip(*data)

    src_lens = [len(t) for t in src]
    tgt_lens = [len(g) for g in tgt]
    src_pad = torch.zeros(len(src), max(src_lens)).long()
    for i, s in enumerate(src):
        end = src_lens[i]
        src_pad[i, :end] = torch.LongTensor(s[:end])

    tgt_pad = torch.zeros(len(tgt), max(tgt_lens)).long()
    for i, t in enumerate(tgt):
        end = tgt_lens[i]
        tgt_pad[i, :end] = torch.LongTensor(t[:end])
    return src_pad,tgt_pad


train_dataset = NERDataset(train)
test_dataset = NERDataset(test)


train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True,collate_fn=padding)
test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True,collate_fn=padding)


# model and optim
model = NERLSTM_CRF(EMBED_DIM, HIDDEN_DIM , DROP_OUT, word2id, tag2id)
# model.load_state_dict(torch.load('./save_dict/BEST_F1_1030.pth',map_location='cpu'))
# print('finish load model !')
model.to(DEVICE)

optimizer = optim.Adam(model.parameters(), lr=LR, weight_decay=WEIGTH_DECAY)


for epoch in range(EPOCH):

    # train
    model.train()
    for index, (X,y) in enumerate(train_dataloader):
        optimizer.zero_grad()
        X = X.to(DEVICE)
        y = y.to(DEVICE)

        #CRF
        loss = model.log_likelihood(X,y)
        loss.backward()
        
        #梯度截断
        torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=10)

        optimizer.step()
        if index % 10 == 0:
            print('epoch:{} -----step:{} -------loss:{}'.format(epoch, index, loss.item()/BATCH_SIZE))
    
    #evaluate
    aver_loss = 0
    preds, labels = [], []

    model.eval()
    with torch.no_grad():

        for index, (X_dev,y_dev) in enumerate(test_dataloader):
            X_dev = X_dev.to(DEVICE)
            y_dev = y_dev.to(DEVICE)
            predict = model(X_dev)
            #CRF
            loss = model.log_likelihood(X_dev, y_dev)
            aver_loss += loss.item()
            labels_list = y_dev.cpu().tolist()
            for i in range(len(predict)):
                preds += predict[i]
            for j in range(len(labels_list)):
                labels += labels_list[j]
                
           
        aver_loss /= (len(test_dataloader) * BATCH_SIZE)

        print('val loss：{}'.format(aver_loss))

        precision = precision_score(labels, preds, average='macro')
        recall = recall_score(labels, preds, average='macro')
        f1 = f1_score(labels, preds, average='macro')
        report = classification_report(labels, preds)
        print(report)
        if f1>BEST_F1:
            BEST_F1 = f1
            torch.save(model.state_dict(), os.path.join(model_output_dir, model_pth_name))   #  模型存储路径可根据需求自定义 



print('ALL DONE!!!')
print('BEST_F1:{}'.format(BEST_F1))