import os
import json
from model import pt_ernie_classifier, pt_bert_classifier, pt_bert_attention_classifier
from transformers import BertTokenizer
from transformers import AdamW, get_linear_schedule_with_warmup
from dataloader import create_Dataset, pytorch_padding, create_Dataset_single, pytorch_polyphone_padding, pytorch_rhythm_padding, pytorch_seg_padding
from torch.utils.data import DataLoader
from sklearn.metrics import classification_report, accuracy_score
import torch
from tqdm import tqdm


test_class = 'polyphone'
test_file = {'seg':"./data/eval_diff_task/seg/nlpcc2016_test", 'rhythm':"./data/eval_diff_task/rhythm/test_", 'polyphone':"./data/eval_diff_task/polyphone/dev_"}
padding_fc = {'seg': pytorch_seg_padding, 'polyphone':pytorch_polyphone_padding, 'rhythm':pytorch_rhythm_padding}


DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # cuda值的选取视服务器GPU使用情况而定
BEST_ACC = 0
# MODEL_NAME = 'nghuyong/ernie-3.0-nano-zh'
MODEL_NAME = '../ernie_para_bert'
model_hidden_size = 312 # medium 768 mini 384 micro 384 nano 312
BATCH_SIZE = 32

model_dir = 'save_model/0926_pt_ernie3_nano_multi_0'
model_name = 'epoch8_acc990_model.pth'
tokenizer = BertTokenizer.from_pretrained(MODEL_NAME, do_lower_case = True)



pinyin_file = os.path.join(model_dir, 'pinyin2id.json')
multitasktag_file = os.path.join('data', 'tag2id.json')
f = open(pinyin_file, 'r', encoding='utf-8')
pinyin2id = json.load(f)
id2pinyin = {v:k for k,v in pinyin2id.items()}
f.close()
f1 = open(multitasktag_file, 'r', encoding='utf-8')
type2id = json.load(f1)
id2type = {v:k for k,v in type2id.items()}
f1.close()

test = torch.load(test_file[test_class])

test_ds = create_Dataset_single(test, tokenizer, test_class, type2id if test_class == 'seg' else pinyin2id)

test_dataloader = DataLoader(test_ds, batch_size=BATCH_SIZE, shuffle=False, collate_fn=padding_fc[test_class])

model = pt_bert_classifier(len(type2id), len(pinyin2id), 2, MODEL_NAME, model_hidden_size)
model.load_state_dict(torch.load(os.path.join(model_dir, model_name)))
model = model.to(DEVICE)

model.eval()
result_dict = {i:{"labels":[], "preds":[]} for i in ['seg', 'poly', 'rhythm1', 'rhythm2', 'rhythm3']}
poly_pos = []
for step, batch in tqdm(enumerate(test_dataloader), total = len(test_dataloader)):
    if test_class == 'polyphone':
        batch_ids, poly_ids, poly_label = batch[1]
        result_dict['poly']['labels'] += poly_label.tolist()
        # 给多音字写的错标分析
        for x,y in zip(batch_ids, poly_ids):
            poly_pos.append([x.tolist()+step*BATCH_SIZE, y.tolist()])
    else:
        batch_ids = torch.LongTensor([0,0])
        poly_ids = torch.LongTensor([0,1])
        if test_class == 'seg':
            seg_label = batch[1].reshape([-1]).tolist()
            result_dict['seg']['labels'] += seg_label
        elif test_class == 'rhythm':
            rhythm1_label = batch[1].reshape([-1]).tolist()
            rhythm2_label = batch[2].reshape([-1]).tolist()
            rhythm3_label = batch[3].reshape([-1]).tolist()
            result_dict['rhythm1']['labels'] += rhythm1_label
            result_dict['rhythm2']['labels'] += rhythm2_label
            result_dict['rhythm3']['labels'] += rhythm3_label


    poly_result, seg_result, rhythm1_result, rhythm2_result, rhythm3_result = model(batch[0].to(DEVICE), attention_mask = (batch[0]>0).to(DEVICE), batch_ids = batch_ids.to(DEVICE), poly_ids = poly_ids.to(DEVICE))

    # 统计acc
    result_dict['poly']['preds'] += torch.argmax(poly_result, dim=-1).tolist()
    result_dict['seg']['preds'] += torch.argmax(seg_result, dim=-1).reshape([-1]).tolist()
    result_dict['rhythm1']['preds'] += torch.argmax(rhythm1_result, dim=-1).reshape([-1]).tolist()
    result_dict['rhythm2']['preds'] += torch.argmax(rhythm2_result, dim=-1).reshape([-1]).tolist()
    result_dict['rhythm3']['preds'] += torch.argmax(rhythm3_result, dim=-1).reshape([-1]).tolist()



total_acc = 0
for task in result_dict.keys():
    labels = result_dict[task]["labels"]
    if labels:
        preds = result_dict[task]["preds"]
        acc = accuracy_score(labels, preds)
        report = classification_report(labels, preds)
        total_acc += acc
        print(task)
        print(report)
        print(acc)
        # 给多音字写的错标分析
        wrong_dict = {}
        for label, pred, pos in zip(labels, preds, poly_pos):
            if label != pred:
                ds_pos = pos[0]
                sen_pos = pos[1]
                data = test[ds_pos]
                word = data[0][sen_pos]
                wrong_dict[word] = wrong_dict.get(word, []) + [''.join(data[0]), sen_pos, 'label: '+id2pinyin[label], 'pred: '+id2pinyin[pred]]

        f = open('wrong_tag_analyze.json', 'w', encoding='utf-8')
        json.dump(wrong_dict, f, ensure_ascii=False)
        f.close()