from utils import build_data,build_iterator
from DcBert_sigmoid import DcBert
import torch
import torch.nn.functional as F
import random
import numpy as np
from sklearn import metrics
from tqdm import tqdm
import os
import json



DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
output_filep = "./output/bert_chat_trans.ckpt"
            
def evaluate(model,data_iter):
    model.eval()
    loss_total = 0
    all_data_num = 0
    crition = torch.nn.BCELoss()
    correct=0
#     predict_all = np.array([], dtype=int)
#     labels_all = np.array([], dtype=int)
    with torch.no_grad():
        for doc, query, labels in tqdm(data_iter):
            all_data_num+=len(query)
            outputs = model(doc,query)
#             loss = F.cross_entropy(outputs, labels)
            
            loss = crition(outputs.squeeze(), labels.float())
            loss_total += loss
#             labels = labels.data.numpy()
#             predic = torch.max(outputs.data, 1)[1].cpu().numpy()
            pred = torch.tensor([[1] if num[0] >= 0.5 else [0] for num in outputs]).cuda()
#             labels_all = np.append(labels_all, labels)
#             predict_all = np.append(predict_all, predic)
            pred = pred.squeeze()
            correct += pred.eq(labels).sum().item()
#     acc = metrics.accuracy_score(labels_all, predict_all)
    return correct/all_data_num, loss_total/len(data_iter)


def train(model,train_iter,dev_iter):
    model.train()
    bert_named_parameters = list(model.bert.named_parameters())
    classifier_parameters = list(model.dense.parameters())
#     transformer_parameters = list(model.trans_encoder.parameters())
#     pe_parameters = list(model.pe.parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
            {'params': [p for n, p in bert_named_parameters if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01,
             'lr': 2e-5},
            {'params': [p for n, p in bert_named_parameters if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,
             'lr': 2e-5},
#             {'params': transformer_parameters},
#             {'params': pe_parameters},
            {'params': classifier_parameters}
    ]
    optimizer = torch.optim.AdamW(params=optimizer_grouped_parameters, lr=0.005)
    
    criterion = torch.nn.BCELoss()

#     optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    dev_best_loss = float('inf')
    total_batch=0
    last_improve = 0  # 记录上次验证集loss下降的batch数
    for epoch in range(10):
        print('epoch {} starting..........'.format(epoch))
        for doc, query, labels in tqdm(train_iter):
#             print(doc)
#             print(query)
#             print(labels)
           
            out = model(doc, query)
            pred = torch.tensor([[1] if num[0] >= 0.5 else [0] for num in out])
#             print(pred)
            model.zero_grad()
#             loss = F.cross_entropy(out, labels)
            out  = out.squeeze()
            loss = criterion(out, labels.float())
            loss.backward()
            optimizer.step()
            if total_batch%20==0:
                print("train loss {0}".format(loss))
            if total_batch % 100 == 0 and total_batch!=0:
                dev_acc, dev_loss = evaluate(model, dev_iter)
                if dev_loss < dev_best_loss:
                    dev_best_loss = dev_loss
                    torch.save(model.state_dict(), output_filep)
                    last_improve = total_batch
                msg = "val loss {0}, val_acc {1}"
                model.train()
                print(msg.format(dev_loss,dev_acc))
            total_batch+=1
            
def load_ccf_data(train_test='train'):
    D = {}
    path = "./data/"
    with open(os.path.join(path, train_test, train_test + '.query.tsv')) as f:
        for l in f:
            span = l.strip().split('\t')
            D[span[0]] = {'query': span[1], 'reply': []}

    with open(os.path.join(path, train_test, train_test + '.reply.tsv')) as f:
        for l in f:
            span = l.strip().split('\t')
            if len(span) == 4:
                q_id, r_id, r, label = span
            else:
                label = None
                q_id, r_id, r = span
            D[q_id]['reply'].append([r_id, r, label])
    d = []
    for k, v in D.items():
        q_id = k
        q = v['query']
        reply = v['reply']

        for r in reply:
            r_id, rc, label = r

            d.append([ q, rc, int(label)])
    return d

def load_chat_data(train_test='train'):
    D = {}
    path = "./data/"
    d = []
    with open('./data/chatbot_specific.txt','r',encoding='utf-8') as fr:
        for line in fr:
            con = json.loads(line.strip())
            query,pos,neg = con["question"],con["pos_response"],con["neg_response"]
            d.append([query,pos,1])
            d.append([query,neg,0])
    return d

def load_train_data():
    datas = load_chat_data('train')
    random.shuffle(datas)
#     print(datas)
    n = int(len(datas) * 0.8)
    train_data = datas[:n]
    dev_data = datas[n:]
    train_iter, dev_iter = build_iterator(train_data, 16, DEVICE, True), build_iterator(dev_data,32,DEVICE)
    print("load data finished")
    return train_iter, dev_iter
    

if __name__=='__main__':
    
#     train(model, train_iter, dev_iter)
    model = DcBert()
    model.to(DEVICE)
    train_iter,dev_iter =load_train_data()
    print("training model ........")
    train(model, train_iter, dev_iter)
    