import os
import json
import paddle 
from model import ernie_classifier
import paddlenlp as ppnlp
import paddle.nn.functional as F
from dataloader import create_Dataset, paddle_padding
from torch.utils.data import DataLoader
from sklearn.metrics import classification_report, accuracy_score
import torch
import logging
from tqdm import tqdm


BEST_ACC = 0
train_file = "./data/processed_data0907/train"
another_train_file = "./data/processed_data0906/train"
test_file = "./data/processed_data0907/test"
another_test_file = "./data/processed_data0906/test"
MODEL_NAME = "ernie-3.0-nano-zh"
model_hidden_size = 312 # medium 768 mini 384 micro 384 nano 312
BATCH_SIZE = 32
EPOCH = 10
LR = 2e-5
tokenizer = ppnlp.transformers.ErnieTokenizer.from_pretrained(MODEL_NAME)
continue_train = True
if continue_train:
    model_dir = 'save_model/0914_ernie3_nano_multi_0'
    model_name = 'epoch5_acc988_model.pdparams'
    optim_name = 'epoch5_optimizer.pdopt'

# model save path
model_output_dir = "save_model/0914_ernie3_nano_multi_%s"
i=0
while True:
    if os.path.exists(model_output_dir%i):
        i+=1
    else:
        model_output_dir = model_output_dir%i
        break
# model_output_dir = "save_model/test_round"
os.makedirs(model_output_dir, exist_ok=True)



pinyin_file = os.path.join('data/processed_data0831/paddle_inference_seg', 'pinyin2id.json')
multitasktag_file = os.path.join('data/processed_data0831/paddle_inference_seg', 'tag2id.json')
f = open(pinyin_file, 'r', encoding='utf-8')
pinyin2id = json.load(f)
id2pinyin = {v:k for k,v in pinyin2id.items()}
f.close()
f1 = open(multitasktag_file, 'r', encoding='utf-8')
type2id = json.load(f1)
id2type = {v:k for k,v in type2id.items()}
f1.close()

logging.basicConfig(filename=os.path.join(model_output_dir, 'logger.log'), level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logging.info('model name: %s'%MODEL_NAME)
logging.info('batch size: %s'%BATCH_SIZE)
logging.info('epoch: %s'%EPOCH)
logging.info('lr: %s'%LR)
logging.info('train file: %s, test file: %s, pinyin file: %s, other tag file: %s'%(train_file, test_file, pinyin_file, multitasktag_file))
logging.info('another train file: %s, another test file: %s'%(another_train_file, another_test_file))
if continue_train:
    logging.info('continue model path: %s'%model_dir)
    logging.info('continue model: %s'%model_name)
logging.info('descibe: all multi with ernie nano and paddle, rhythm loss weight 0.1:1, total loss weight equals, Punctuation change to "_", train with polyphone and rhythm data')


train = torch.load(train_file)
train += torch.load(another_train_file)
test = torch.load(test_file)
test += torch.load(another_test_file)

train_ds = create_Dataset(train, tokenizer, pinyin2id, type2id)
test_ds = create_Dataset(test, tokenizer, pinyin2id, type2id)

train_dataloader = DataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=True,collate_fn=paddle_padding)
test_dataloader = DataLoader(test_ds, batch_size=BATCH_SIZE, shuffle=True,collate_fn=paddle_padding)

model = ernie_classifier(len(type2id), len(pinyin2id), 2, MODEL_NAME, model_hidden_size)
optimizer = paddle.optimizer.AdamW(learning_rate=LR, parameters=model.parameters())
criterion_seg = paddle.nn.loss.CrossEntropyLoss()
criterion_poly = paddle.nn.loss.CrossEntropyLoss()
criterion_r1 = paddle.nn.loss.CrossEntropyLoss(weight = paddle.to_tensor([0.1, 1]))
criterion_r2 = paddle.nn.loss.CrossEntropyLoss(weight = paddle.to_tensor([0.1, 1]))
criterion_r3 = paddle.nn.loss.CrossEntropyLoss(weight = paddle.to_tensor([0.1, 1]))
metric1 = paddle.metric.Accuracy()
metric2 = paddle.metric.Accuracy()
metric3 = paddle.metric.Accuracy()
metric4 = paddle.metric.Accuracy()
metric5 = paddle.metric.Accuracy()

if continue_train:
    model.set_state_dict(paddle.load(os.path.join(model_dir, model_name)))
    optimizer.set_state_dict(paddle.load(os.path.join(model_dir, optim_name)))

global_step = 0
for epoch in range(1, EPOCH + 1):
    for step, batch in enumerate(train_dataloader, start=1):
        batch_ids, poly_ids, poly_labels = batch[1]
        poly_result, seg_result, rhythm1_result, rhythm2_result, rhythm3_result = model(batch[0], attention_mask = (batch[0]>0), batch_ids = batch_ids, poly_ids = poly_ids)
        seg_labels = batch[2]
        rhythm1_labels = batch[3]
        rhythm2_labels = batch[4]
        rhythm3_labels = batch[5]
        seg_loss = criterion_seg(seg_result, seg_labels)
        poly_loss = criterion_poly(poly_result, poly_labels)
        rhythm1_loss = criterion_poly(rhythm1_result, rhythm1_labels)
        rhythm2_loss = criterion_poly(rhythm2_result, rhythm2_labels)
        rhythm3_loss = criterion_poly(rhythm3_result, rhythm3_labels)

        # 统计acc
        # print(poly_result.shape)
        poly_probs = F.softmax(poly_result, axis=-1)
        seg_probs = F.softmax(seg_result, axis=-1)
        rhythm1_probs = F.softmax(rhythm1_result, axis=-1)
        rhythm2_probs = F.softmax(rhythm2_result, axis=-1)
        rhythm3_probs = F.softmax(rhythm3_result, axis=-1)
        # 多音字
        correct1 = metric1.compute(poly_probs, poly_labels)
        metric1.update(correct1)
        acc1 = metric1.accumulate()

        # 分词
        correct2 = metric2.compute(seg_probs.reshape([-1, len(type2id)]), seg_labels.reshape([-1]))
        metric2.update(correct2)
        acc2 = metric2.accumulate()

        # 韵律
        # 1
        correct3 = metric3.compute(rhythm1_probs.reshape([-1, 2]), rhythm1_labels.reshape([-1]))
        metric3.update(correct3)
        acc3 = metric3.accumulate()
        # 2
        correct4 = metric4.compute(rhythm2_probs.reshape([-1, 2]), rhythm2_labels.reshape([-1]))
        metric4.update(correct4)
        acc4 = metric4.accumulate()
        # 3
        correct5 = metric5.compute(rhythm3_probs.reshape([-1, 2]), rhythm3_labels.reshape([-1]))
        metric5.update(correct5)
        acc5 = metric5.accumulate()

        # 损失函数
        loss = seg_loss + poly_loss + rhythm1_loss + rhythm2_loss + rhythm3_loss
        if global_step % 1000 == 0 :
            logging.info("global step %d, epoch: %d, batch: %d, loss: %.5f" % (global_step, epoch, step, loss))
            logging.info("phonic acc: %.5f, task acc: %.5f, rhythm1 acc: %.5f, rhythm2 acc: %.5f, rhythm3 acc: %.5f" % (acc1, acc2, acc3, acc4, acc5))

        loss.backward()
        optimizer.step()
        optimizer.clear_grad()
        global_step += 1
        
    model.eval()
    result_dict = {i:{"labels":[], "preds":[]} for i in ['seg', 'poly', 'rhythm1', 'rhythm2', 'rhythm3']}
    for batch in tqdm(test_dataloader, total=len(test_dataloader)):
        batch_ids, poly_ids, poly_label = batch[1]
        poly_result, seg_result, rhythm1_result, rhythm2_result, rhythm3_result = model(batch[0], attention_mask = (batch[0]>0), batch_ids = batch_ids, poly_ids = poly_ids)
        seg_label = batch[2].reshape([-1]).tolist()
        rhythm1_label = batch[3].reshape([-1]).tolist()
        rhythm2_label = batch[4].reshape([-1]).tolist()
        rhythm3_label = batch[5].reshape([-1]).tolist()

        # 统计acc
        result_dict['poly']['preds'] += paddle.argmax(poly_result, axis=1).tolist()
        result_dict['seg']['preds'] += paddle.argmax(seg_result, axis=2).reshape([-1]).tolist()
        result_dict['rhythm1']['preds'] += paddle.argmax(rhythm1_result, axis=2).reshape([-1]).tolist()
        result_dict['rhythm2']['preds'] += paddle.argmax(rhythm2_result, axis=2).reshape([-1]).tolist()
        result_dict['rhythm3']['preds'] += paddle.argmax(rhythm3_result, axis=2).reshape([-1]).tolist()

        result_dict['poly']['labels'] += poly_label.tolist()
        result_dict['seg']['labels'] += seg_label
        result_dict['rhythm1']['labels'] += rhythm1_label
        result_dict['rhythm2']['labels'] += rhythm2_label
        result_dict['rhythm3']['labels'] += rhythm3_label

    total_acc = 0
    for task in result_dict.keys():
        labels = result_dict[task]["labels"]
        preds = result_dict[task]["preds"]
        acc = accuracy_score(labels, preds)
        report = classification_report(labels, preds)
        total_acc += acc
        print(task)
        print(report)
        print(acc)
        logging.info('%s acc: %s'%(task, acc))
    total_acc /= 5
    logging.info('avg acc: %s'%total_acc)
    if total_acc > BEST_ACC:
        logging.info('Better acc! Save model and dict!')
        # save
        paddle.save(model.state_dict(), os.path.join(model_output_dir, "epoch%s_acc%.0f_model.pdparams"%(epoch, total_acc*1000)))
        tokenizer.save_pretrained(model_output_dir)
        paddle.save(optimizer.state_dict(), os.path.join(model_output_dir, "epoch%s_optimizer.pdopt"%epoch))
        json_write = open(os.path.join(model_output_dir, 'pinyin2id.json'), 'w', encoding='utf-8')
        json.dump(pinyin2id, json_write, ensure_ascii=False)
        json_write.close()
        json_write1 = open(os.path.join(model_output_dir, 'tag2id.json'), 'w', encoding='utf-8')
        json.dump(type2id, json_write1, ensure_ascii=False)
        json_write1.close()

        BEST_ACC = total_acc