


import time
import torch
import random
import numpy as np

from transformers import BertTokenizer
from transformers import BertForTokenClassification
from torch.utils.data import TensorDataset, DataLoader, random_split, Dataset
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report

seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True

DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # cuda值的选取视服务器GPU使用情况而定
# DEVICE = torch.device('cpu')
model_path = 'save_model/bert_model_2'

def convert_seg_result_to_bmes(seg_result, tag2id):
    hidden_seq = []
    for term in seg_result:
        # BMES
        if len(term)==1:
            hidden_seq.append('S')
        elif len(term)==0:
            hidden_seq.append('')
        else:
            hid = 'B'+'M'*(len(term)-2)+'E'
            hidden_seq.append(hid)    
    hidden_seq = list(''.join(hidden_seq))

    return [tag2id[i] for i in hidden_seq]

def load_dict(dict_path):
    vocab = {}
    i = 0
    for line in open(dict_path, 'r', encoding='utf-8'):
        key = line.strip('\n')
        vocab[key] = i
        i += 1
    return vocab, {v: k for k, v in vocab.items()}

    
word2id, id2word = load_dict('../word2id.txt')
tag2id, id2tag = load_dict('../tag2id.txt')


test_data = torch.load('../test_data')
#convert back = =
def convert_id_back_words(data,id2word):
    # data is a id list
    return [id2word[i] for i in data]
test = [[convert_id_back_words(i[0], id2word),i[1]] for i in test_data if len(i[1])<512]



# f = open('../../data/segment_sentence_data/nlpcc2016-wordseg-dev.txt', 'r', encoding='utf-8')
# test = [i.strip('\n').split(' ') for i in f.readlines()]
# f.close()
# test = [[list(''.join(i)), convert_seg_result_to_bmes(i, tag2id)] for i in test]


# create Dataset
class create_Dataset(Dataset):
    def __init__(self,data):
        input_ids = [tokenizer.convert_tokens_to_ids(i[0]) for i in data]
        self.x = input_ids
        output_ids = [i[1] for i in data]
        self.y = output_ids

    def __getitem__(self, index):
        return self.x[index],self.y[index]

    def __len__(self):
        return len(self.x)

# Load the pretrained Tokenizer
tokenizer = BertTokenizer.from_pretrained(model_path, do_lower_case=True)

t = time.time()
model = BertForTokenClassification.from_pretrained(model_path, num_labels=len(tag2id))
model = model.to(DEVICE)
t1 = time.time()
print(t1-t)
# train_ds = create_Dataset(train)
test_ds = create_Dataset(test)
print(len(test_ds))
# dataloader ：padding 
def padding(data):
    x, y = zip(*data)

    x_lens = [len(t) for t in x]
    x_pad = torch.zeros(len(x), max(x_lens)).long()
    for i, s in enumerate(x):
        end = x_lens[i]
        x_pad[i, :end] = torch.LongTensor(s[:end])
    y_lens = [len(t) for t in y]
    y_pad = torch.zeros(len(y), max(y_lens)).long()
    for i, t in enumerate(y):
        end = y_lens[i]
        y_pad[i, :end] = torch.LongTensor(t[:end])

    return x_pad, y_pad

    
test_dataloader = DataLoader(test_ds, batch_size=10, shuffle=False,collate_fn=padding)

preds, labels = [], []
total_val_loss = 0

for i, batch in enumerate(test_dataloader):
    with torch.no_grad():
        # result = model(batch[0].to(DEVICE), token_type_ids=None, attention_mask=(batch[0]>0).to(DEVICE), labels=batch[1].to(DEVICE))
        result = model(batch[0].to(DEVICE), attention_mask=(batch[0]>0).to(DEVICE))
        # aver_loss += loss.item()
        # labels_list = batch[1].to(DEVICE).cpu().tolist()
        # for i in range(len(predict)):
            # preds += predict[i]
        # for j in range(len(labels_list)):
            # labels += labels_list[j]
# aver_loss /= (len(test_dataloader) * BATCH_SIZE)
# print('val loss:{}'.format(aver_loss))

        # total_val_loss += result.loss.item()
        if i%200==0:
            print('step---%s'%i)
            # print('batch---%s, loss---'%(i, result.loss.item()))
        
        logits = result.logits.detach().cpu()
        logits = torch.nn.functional.softmax(logits, dim=2)
        label_ids = batch[1].to('cpu').numpy()
        for i in range(len(logits)):
            preds.extend(torch.argmax(logits[i],dim=1).tolist())
        for j in range(len(label_ids)):
            labels.extend(label_ids[j])

print(time.time()-t1)
precision = precision_score(labels, preds, average='macro')
recall = recall_score(labels, preds, average='macro')
f1 = f1_score(labels, preds, average='macro')
report = classification_report(labels, preds)
print('test', report)