import os
import json
from model import pt_ernie_classifier, pt_bert_classifier, pt_bert_attention_classifier
from transformers import BertTokenizer, BertModel
from transformers import AdamW, get_linear_schedule_with_warmup
from dataloader import create_Dataset, pytorch_padding, create_Dataset_type, pytorch_padding_type
from torch.utils.data import DataLoader
from sklearn.metrics import classification_report, accuracy_score
import torch
import logging


DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # cuda值的选取视服务器GPU使用情况而定
BEST_ACC = 0
train_file = "./data/processed_data0927/train"
# another_train_file = "./data/processed_data0926/rhythm_train"
test_file = "./data/processed_data0927/test"
# another_test_file = "./data/processed_data0926/rhythm_test"
MODEL_NAME = '../ernie_para_bert'
model_hidden_size = 312 # medium 768 mini 384 micro 384 nano 312
BATCH_SIZE = 64
EPOCH = 10
LR = 2e-5
tokenizer = BertTokenizer.from_pretrained(MODEL_NAME, do_lower_case = True)
continue_train = False
if continue_train:
    model_dir = 'save_model/0707_ernie3_nano_model_2'
    model_name = 'epoch5_acc981_model.pdparams'
    optim_name = 'epoch5_optimizer.pdopt'

# model save path
model_output_dir = "save_model/0926_pt_ernie3_nano_multi_%s"
i=0
while True:
    if os.path.exists(model_output_dir%i):
        i+=1
    else:
        model_output_dir = model_output_dir%i
        break
# model_output_dir = "save_model/test_round"
os.makedirs(model_output_dir, exist_ok=True)



pinyin_file = os.path.join('data/processed_data0926', 'pinyin2id_0927.json')
multitasktag_file = os.path.join('data', 'tag2id.json')
f = open(pinyin_file, 'r', encoding='utf-8')
pinyin2id = json.load(f)
id2pinyin = {v:k for k,v in pinyin2id.items()}
f.close()
f1 = open(multitasktag_file, 'r', encoding='utf-8')
tag2id = json.load(f1)
id2tag = {v:k for k,v in tag2id.items()}
f1.close()

logging.basicConfig(filename=os.path.join(model_output_dir, 'logger.log'), level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logging.info('model name: %s'%MODEL_NAME)
logging.info('batch size: %s'%BATCH_SIZE)
logging.info('epoch: %s'%EPOCH)
logging.info('lr: %s'%LR)
logging.info('train file: %s, test file: %s, pinyin file: %s, other tag file: %s'%(train_file, test_file, pinyin_file, multitasktag_file))
# logging.info('another train file: %s, another test file: %s'%(another_train_file, another_test_file))
if continue_train:
    logging.info('continue model path: %s'%model_dir)
    logging.info('continue model: %s'%model_name)
logging.info('descibe: all multi with ernie nano(with BertModel) and pytorch, rhythm loss weight 0.1:1')
logging.info('total loss weight 1:2, Punctuation change to "_", train with polyphone and rhythm data, use clean(new) rhythm data and polyphone data')
logging.info('多音字多标会输入噪声，尝试权重为2:0，韵律权重1：1')



train = torch.load(train_file)
# train += torch.load(another_train_file)
test = torch.load(test_file)
# test += torch.load(another_test_file)
type2id = {"poly":0, "rhythm":1}

train_ds = create_Dataset_type(train, tokenizer, pinyin2id, tag2id, type2id)
test_ds = create_Dataset_type(test, tokenizer, pinyin2id, tag2id, type2id)
# train_ds = create_Dataset(train, tokenizer, pinyin2id, tag2id)
# test_ds = create_Dataset(test, tokenizer, pinyin2id, tag2id)

# train_dataloader = DataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=True,collate_fn=pytorch_padding)
# test_dataloader = DataLoader(test_ds, batch_size=BATCH_SIZE, shuffle=True,collate_fn=pytorch_padding)
train_dataloader = DataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=True,collate_fn=pytorch_padding_type)
test_dataloader = DataLoader(test_ds, batch_size=BATCH_SIZE, shuffle=True,collate_fn=pytorch_padding_type)

# model = pt_ernie_classifier(len(type2id), len(pinyin2id), 2, MODEL_NAME, model_hidden_size)
model = pt_bert_classifier(len(tag2id), len(pinyin2id), 2, MODEL_NAME, model_hidden_size)
# create optimizer and learning rate schedule
optimizer = AdamW(model.parameters(), lr=LR)
total_steps = len(train) * EPOCH
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)
criterion_seg = torch.nn.CrossEntropyLoss()
criterion_poly = torch.nn.CrossEntropyLoss()
criterion_r1 = torch.nn.CrossEntropyLoss(weight = torch.Tensor([0.1, 1]))
criterion_r2 = torch.nn.CrossEntropyLoss(weight = torch.Tensor([0.1, 1]))
criterion_r3 = torch.nn.CrossEntropyLoss(weight = torch.Tensor([0.1, 1]))


if continue_train:
    model.load_state_dict(torch.load(os.path.join(model_dir, model_name)))
model = model.to(DEVICE)

global_step = 0
for epoch in range(1, EPOCH + 1):
    for step, batch in enumerate(train_dataloader, start=1):
        model.zero_grad()

        # 真实标签在batch的位置(实际上seg是rhythm 打错了)
        seg_pos = torch.where(batch[6])[0]
        poly_pos = torch.where(batch[6]==0)[0]

        batch_ids, poly_ids, poly_labels = batch[1]
        poly_result, seg_result, rhythm1_result, rhythm2_result, rhythm3_result = model(batch[0].to(DEVICE), attention_mask = (batch[0]>0).to(DEVICE), batch_ids = batch_ids.to(DEVICE), poly_ids = poly_ids.to(DEVICE))
        seg_labels = batch[2]
        rhythm1_labels = batch[3]
        rhythm2_labels = batch[4]
        rhythm3_labels = batch[5]

        # 分开原数据来自多音字数据集或是韵律数据集
        seg_poly_pos = [idx for idx, num in enumerate(batch_ids) if num in seg_pos]
        poly_poly_pos = [idx for idx, num in enumerate(batch_ids) if num in poly_pos]
        seg_poly_result = poly_result[seg_poly_pos]
        seg_poly_labels = poly_labels[seg_poly_pos]
        poly_poly_result = poly_result[poly_poly_pos]
        poly_poly_labels = poly_labels[poly_poly_pos]
        seg_seg_result = seg_result[seg_pos]
        seg_seg_labels = seg_labels[seg_pos]
        poly_seg_result = seg_result[poly_pos]
        poly_seg_labels = seg_labels[poly_pos]
        seg_rhythm1_result = rhythm1_result[seg_pos]
        seg_rhythm1_labels = rhythm1_labels[seg_pos]
        poly_rhythm1_result = rhythm1_result[poly_pos]
        poly_rhythm1_labels = rhythm1_labels[poly_pos]
        seg_rhythm2_result = rhythm2_result[seg_pos]
        seg_rhythm2_labels = rhythm2_labels[seg_pos]
        poly_rhythm2_result = rhythm2_result[poly_pos]
        poly_rhythm2_labels = rhythm2_labels[poly_pos]
        seg_rhythm3_result = rhythm3_result[seg_pos]
        seg_rhythm3_labels = rhythm3_labels[seg_pos]
        poly_rhythm3_result = rhythm3_result[poly_pos]
        poly_rhythm3_labels = rhythm3_labels[poly_pos]


        # 权重配比：硬2：软1
        seg_loss = criterion_seg(seg_seg_result.reshape([-1, len(tag2id)]), seg_seg_labels.reshape(-1).to(DEVICE)) + criterion_seg(poly_seg_result.reshape([-1, len(tag2id)]), poly_seg_labels.reshape(-1).to(DEVICE))
        poly_loss = 0 * criterion_poly(seg_poly_result.reshape([-1, len(pinyin2id)]), seg_poly_labels.reshape(-1).to(DEVICE)) + 2 * criterion_poly(poly_poly_result.reshape([-1, len(pinyin2id)]), poly_poly_labels.reshape(-1).to(DEVICE))
        rhythm1_loss = 1 * criterion_poly(seg_rhythm1_result.reshape([-1, 2]), seg_rhythm1_labels.reshape(-1).to(DEVICE)) + criterion_poly(poly_rhythm1_result.reshape([-1, 2]), poly_rhythm1_labels.reshape(-1).to(DEVICE))
        rhythm2_loss = 1 * criterion_poly(seg_rhythm2_result.reshape([-1, 2]), seg_rhythm2_labels.reshape(-1).to(DEVICE)) + criterion_poly(poly_rhythm2_result.reshape([-1, 2]), poly_rhythm2_labels.reshape(-1).to(DEVICE))
        rhythm3_loss = 1 * criterion_poly(seg_rhythm3_result.reshape([-1, 2]), seg_rhythm3_labels.reshape(-1).to(DEVICE)) + criterion_poly(poly_rhythm3_result.reshape([-1, 2]), poly_rhythm3_labels.reshape(-1).to(DEVICE))

        # seg_loss = criterion_seg(seg_result.reshape([-1, len(tag2id)]), seg_labels.reshape(-1).to(DEVICE))
        # poly_loss = criterion_poly(poly_result.reshape([-1, len(pinyin2id)]), poly_labels.reshape(-1).to(DEVICE))
        # rhythm1_loss = criterion_poly(rhythm1_result.reshape([-1, 2]), rhythm1_labels.reshape(-1).to(DEVICE))
        # rhythm2_loss = criterion_poly(rhythm2_result.reshape([-1, 2]), rhythm2_labels.reshape(-1).to(DEVICE))
        # rhythm3_loss = criterion_poly(rhythm3_result.reshape([-1, 2]), rhythm3_labels.reshape(-1).to(DEVICE))
        

        # 损失函数
        loss = seg_loss + poly_loss + rhythm1_loss + rhythm2_loss + rhythm3_loss
        if global_step % 1000 == 0 :
            logging.info("global step %d, epoch: %d, batch: %d, loss: %.5f" % (global_step, epoch, step, loss))

        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
        optimizer.step() 
        scheduler.step()
        global_step += 1
        
    model.eval()
    result_dict = {i:{"labels":[], "preds":[]} for i in ['seg', 'poly', 'rhythm1', 'rhythm2', 'rhythm3']}
    for step, batch in enumerate(test_dataloader):
        batch_ids, poly_ids, poly_label = batch[1]
        poly_result, seg_result, rhythm1_result, rhythm2_result, rhythm3_result = model(batch[0].to(DEVICE), attention_mask = (batch[0]>0).to(DEVICE), batch_ids = batch_ids.to(DEVICE), poly_ids = poly_ids.to(DEVICE))
        seg_label = batch[2].reshape([-1]).tolist()
        rhythm1_label = batch[3].reshape([-1]).tolist()
        rhythm2_label = batch[4].reshape([-1]).tolist()
        rhythm3_label = batch[5].reshape([-1]).tolist()

        # 统计acc
        result_dict['poly']['preds'] += torch.argmax(poly_result, dim=-1).tolist()
        result_dict['seg']['preds'] += torch.argmax(seg_result, dim=-1).reshape([-1]).tolist()
        result_dict['rhythm1']['preds'] += torch.argmax(rhythm1_result, dim=-1).reshape([-1]).tolist()
        result_dict['rhythm2']['preds'] += torch.argmax(rhythm2_result, dim=-1).reshape([-1]).tolist()
        result_dict['rhythm3']['preds'] += torch.argmax(rhythm3_result, dim=-1).reshape([-1]).tolist()

        result_dict['poly']['labels'] += poly_label.tolist()
        result_dict['seg']['labels'] += seg_label
        result_dict['rhythm1']['labels'] += rhythm1_label
        result_dict['rhythm2']['labels'] += rhythm2_label
        result_dict['rhythm3']['labels'] += rhythm3_label

    total_acc = 0
    for task in result_dict.keys():
        labels = result_dict[task]["labels"]
        preds = result_dict[task]["preds"]
        acc = accuracy_score(labels, preds)
        report = classification_report(labels, preds)
        if task != 'seg':
            total_acc += acc
        print(task)
        print(report)
        print(acc)
        logging.info('%s acc: %s'%(task, acc))
    total_acc /= 4
    logging.info('avg acc: %s'%total_acc)
    if total_acc > BEST_ACC:
        logging.info('Better acc! Save model and dict!')
        # save
        torch.save(model.state_dict(), os.path.join(model_output_dir, "epoch%s_acc%.0f_model.pth"%(epoch, total_acc*1000)))
        tokenizer.save_pretrained(model_output_dir)
        torch.save(optimizer.state_dict(), os.path.join(model_output_dir, "epoch%s_optimizer.pth"%epoch))
        json_write = open(os.path.join(model_output_dir, 'pinyin2id.json'), 'w', encoding='utf-8')
        json.dump(pinyin2id, json_write, ensure_ascii=False)
        json_write.close()
        json_write1 = open(os.path.join(model_output_dir, 'tag2id.json'), 'w', encoding='utf-8')
        json.dump(tag2id, json_write1, ensure_ascii=False)
        json_write1.close()

        BEST_ACC = total_acc