import os
import numpy as np 
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report
import sklearn 
import warnings
import sklearn.exceptions
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
import time

import torch
from torch.utils.data import Dataset,DataLoader
import torch.nn as nn
import torch.optim as optim

from LSTM_CRF import NERLSTM_CRF

def load_dict(dict_path):
    vocab = {}
    i = 0
    for line in open(dict_path, 'r', encoding='utf-8'):
        key = line.strip('\n')
        vocab[key] = i
        i += 1
    return vocab, {v: k for k, v in vocab.items()}


def convert_tokens_2_id(tokens_list, token2id_dict):
    '''
    tokens_list should be [a,b,c,d,e,f,g]
    '''
    new_tokens_list = []
    for i in tokens_list:
        if i not in token2id_dict.keys():
            i = 'OOV'
        new_tokens_list.append(token2id_dict.get(i, 0))
    return new_tokens_list

#set random seed
seed = 666666
torch.manual_seed(seed)            
torch.cuda.manual_seed(seed)       
torch.cuda.manual_seed_all(seed) 

word2id, id2word = load_dict('word2id.txt')
tag2id, id2tag = load_dict('tag2id.txt')
model_output_dir = ''
model_name = ''


DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # cuda值的选取视服务器GPU使用情况而定
DROP_OUT = 0.2
EMBED_DIM = 100
HIDDEN_DIM = 200

t= time.time()
# model and optim
model = NERLSTM_CRF(EMBED_DIM, HIDDEN_DIM , DROP_OUT, word2id, tag2id)
model.load_state_dict(torch.load(os.path.join(model_output_dir, model_name),map_location='cpu'))
model.to(DEVICE)
print('finish load model !')
print(time.time()-t)

input = input('input: ')

t = time.time()
X_dev = torch.tensor([word2id[i] for i in input])
X_dev = X_dev.reshape(1, len(X_dev))
print(X_dev, X_dev.shape)

def segment_result(sentence, seg_tag):
    if len(sentence) != len(seg_tag):
        return 'diff length between inputs'
    
    result = []
    temp_term = ''
    for index, tag in enumerate(seg_tag):
        if tag=='E' or tag == 'S':
            temp_term += sentence[index]
            result.append(temp_term)
            temp_term = ''
        else:
            temp_term += sentence[index]
    
    return result

with torch.no_grad():
    X_dev = X_dev.to(DEVICE)
    predict = model(X_dev)
    predict = [id2tag[i] for i in predict[0]]
    final = segment_result(input, predict)
    print(final)
print(time.time() - t)