import os
import json
import paddle 
from paddle.io import Dataset
from model import ernie_classifier
import paddlenlp as ppnlp
import paddle.nn.functional as F
from dataloader import create_Dataset, paddle_padding, create_Dataset_single, paddle_seg_padding
from torch.utils.data import DataLoader
from sklearn.metrics import classification_report, accuracy_score
import torch
from tqdm import tqdm


BEST_ACC = 0
test_file = "./data/eval_diff_task/seg/nlpcc2016_test"
MODEL_NAME = "ernie-3.0-nano-zh"
model_hidden_size = 312 # medium 768 mini 384 micro 384 nano 312
BATCH_SIZE = 8
model_dir = 'save_model/0914_ernie3_nano_multi_2'
model_name = 'epoch6_acc989_model.pdparams'

tokenizer = ppnlp.transformers.ErnieTokenizer.from_pretrained(MODEL_NAME)



pinyin_file = os.path.join('data/processed_data0831/paddle_inference_seg', 'pinyin2id.json')
multitasktag_file = os.path.join('data/processed_data0831/paddle_inference_seg', 'tag2id.json')
f = open(pinyin_file, 'r', encoding='utf-8')
pinyin2id = json.load(f)
id2pinyin = {v:k for k,v in pinyin2id.items()}
f.close()
f1 = open(multitasktag_file, 'r', encoding='utf-8')
type2id = json.load(f1)
id2type = {v:k for k,v in type2id.items()}
f1.close()



test = torch.load(test_file)

test_ds = create_Dataset_single(test, tokenizer, 'seg', type2id)

test_dataloader = DataLoader(test_ds, batch_size=BATCH_SIZE, shuffle=True,collate_fn=paddle_seg_padding)

model = ernie_classifier(len(type2id), len(pinyin2id), 2, MODEL_NAME, model_hidden_size)
model.set_state_dict(paddle.load(os.path.join(model_dir, model_name)))


global_step = 0
model.eval()
result_dict = {i:{"labels":[], "preds":[]} for i in ['seg', 'poly', 'rhythm1', 'rhythm2', 'rhythm3']}
for batch in tqdm(test_dataloader, total=len(test_dataloader)):
    # batch_ids, poly_ids, poly_label = batch[1]
    batch_ids = paddle.to_tensor([0,0], dtype='int64')
    poly_ids = paddle.to_tensor([0,1], dtype='int64') # not testing polyphone
    seg_label = batch[1].reshape([-1]).tolist()
    # rhythm1_label = batch[1].reshape([-1]).tolist()
    # rhythm2_label = batch[2].reshape([-1]).tolist()
    # rhythm3_label = batch[3].reshape([-1]).tolist()
    poly_result, seg_result, rhythm1_result, rhythm2_result, rhythm3_result = model(batch[0], attention_mask = (batch[0]>0), batch_ids = batch_ids, poly_ids = poly_ids)

    # 统计acc
    result_dict['poly']['preds'] += paddle.argmax(poly_result, axis=-1).tolist()
    result_dict['seg']['preds'] += paddle.argmax(seg_result, axis=-1).reshape([-1]).tolist()
    result_dict['rhythm1']['preds'] += paddle.argmax(rhythm1_result, axis=-1).reshape([-1]).tolist()
    result_dict['rhythm2']['preds'] += paddle.argmax(rhythm2_result, axis=-1).reshape([-1]).tolist()
    result_dict['rhythm3']['preds'] += paddle.argmax(rhythm3_result, axis=-1).reshape([-1]).tolist()

    # result_dict['poly']['labels'] += poly_label.tolist()
    result_dict['seg']['labels'] += seg_label
    # result_dict['rhythm1']['labels'] += rhythm1_label
    # result_dict['rhythm2']['labels'] += rhythm2_label
    # result_dict['rhythm3']['labels'] += rhythm3_label

total_acc = 0
for task in result_dict.keys():
    labels = result_dict[task]["labels"]
    if labels:
        preds = result_dict[task]["preds"]
        acc = accuracy_score(labels, preds)
        report = classification_report(labels, preds)
        total_acc += acc
        print(task)
        print(report)
        print(acc)