import numpy as np 
import sklearn 
import warnings
import sklearn.exceptions
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
import torch
from torch.utils.data import Dataset,DataLoader
import torch.nn as nn
import torch.optim as optim
import os
import random
import numpy as np
from tqdm import tqdm
from torch.utils.data import TensorDataset, DataLoader, random_split, Dataset
from transformers import BertTokenizer
from transformers import BertForTokenClassification, AdamW
from transformers import get_linear_schedule_with_warmup
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report
from transformers import WEIGHTS_NAME, CONFIG_NAME
from torchcrf import CRF

seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True


# Hyper parameters
BATCH_SIZE = 6
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # cuda值的选取视服务器GPU使用情况而定
epochs = 1
BEST_F1 = 0
model_output_dir = "deep_learning/bert/save_model/bert_model_%s"
i=0
while True:
    if os.path.exists(model_output_dir%i):
        i+=1
    else:
        model_output_dir = model_output_dir%i
        break
os.mkdir(model_output_dir)

def load_dict(dict_path):
    vocab = {}
    i = 0
    for line in open(dict_path, 'r', encoding='utf-8'):
        key = line.strip('\n')
        vocab[key] = i
        i += 1
    return vocab, {v: k for k, v in vocab.items()}

def convert_seg_result_to_bmes(seg_result, tag2id):
    hidden_seq = []
    for term in seg_result:
        # BMES
        if len(term)==1:
            hidden_seq.append('S')
        elif len(term)==0:
            hidden_seq.append('')
        else:
            hid = 'B'+'M'*(len(term)-2)+'E'
            hidden_seq.append(hid)    
    hidden_seq = list(''.join(hidden_seq))

    return [tag2id[i] for i in hidden_seq]

f = open('data/segment_sentence_data/nlpcc2016-word-seg-train.txt', 'r', encoding='utf-8')
train = [i.strip('\n').split(' ') for i in f.readlines()]
f.close()
f = open('data/segment_sentence_data/nlpcc2016-wordseg-dev.txt', 'r', encoding='utf-8')
test = [i.strip('\n').split(' ') for i in f.readlines()]
f.close()

word2id, id2word = load_dict('deep_learning/word2id.txt')
tag2id, id2tag = load_dict('deep_learning/tag2id.txt')

train = [[list(''.join(i)), convert_seg_result_to_bmes(i, tag2id)] for i in train]
test = [[list(''.join(i)), convert_seg_result_to_bmes(i, tag2id)] for i in test]


another_train = torch.load('deep_learning/train_data')
another_test = torch.load('deep_learning/test_data')


#convert back = =
def convert_id_back_words(data,id2word):
    # data is a id list
    return [id2word[i] for i in data]

train += [[convert_id_back_words(i[0], id2word),i[1]] for i in another_train if len(i[1])<512]
test += [[convert_id_back_words(i[0], id2word),i[1]] for i in another_test if len(i[1])<512]

print(len(train))
print(len(test))

# create Dataset
class create_Dataset(Dataset):
    def __init__(self,data):
        input_ids = [tokenizer.convert_tokens_to_ids(i[0]) for i in data]
        self.x = input_ids
        self.y = [i[1] for i in data]

    def __getitem__(self, index):
        return self.x[index],self.y[index]

    def __len__(self):
        return len(self.x)

# Load the pretrained Tokenizer
tokenizer = BertTokenizer.from_pretrained('deep_learning/bert/save_model/bert_model_0', do_lower_case=True)
train_ds = create_Dataset(train)
test_ds = create_Dataset(test)

# dataloader ：padding 
def padding(data):
    x, y = zip(*data)

    x_lens = [len(t) for t in x]
    x_pad = torch.zeros(len(x), max(x_lens)).long()
    for i, s in enumerate(x):
        end = x_lens[i]
        x_pad[i, :end] = torch.LongTensor(s[:end])
    y_lens = [len(t) for t in y]
    y_pad = torch.zeros(len(y), max(y_lens)).long()
    for i, t in enumerate(y):
        end = y_lens[i]
        y_pad[i, :end] = torch.LongTensor(t[:end])

    return x_pad, y_pad

train_dataloader = DataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=True,collate_fn=padding)
test_dataloader = DataLoader(test_ds, batch_size=BATCH_SIZE, shuffle=False,collate_fn=padding)

model = BertForTokenClassification.from_pretrained('bert-base-chinese', num_labels=len(tag2id))
# model = BertForTokenClassification.from_pretrained('deep_learning/bert/save_model/bert_model_0', num_labels=len(tag2id))
# model = BERT_CRF(BertForTokenClassification, tag2id)
model = model.to(DEVICE)
# create optimizer and learning rate schedule
optimizer = AdamW(model.parameters(), lr=2e-5)
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)

for epoch in range(epochs):
    model.train()
    total_loss, total_val_loss = 0, 0
    total_eval_accuracy = 0
    preds, labels = [], []
    for step, batch in enumerate(train_dataloader):
        model.zero_grad()
        # loss, logits = model(batch[0].to(DEVICE), token_type_ids=None, attention_mask=(batch[0]>0).to(DEVICE), labels=batch[1].to(DEVICE))
        result = model(batch[0].to(DEVICE), token_type_ids=None, attention_mask=(batch[0]>0).to(DEVICE), labels=batch[1].to(DEVICE))
        loss = result.loss*100
        # total_loss += loss.item()
        # optimizer.zero_grad() 
        #CRF
        # loss = model.log_likelihood(batch[0].to(DEVICE),batch[1].to(DEVICE))
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
        # torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=10)
        optimizer.step() 
        scheduler.step()
        if step%100 == 0:
            print('epoch:', epoch, '-----step:',step, '-----loss:',loss.item())
        logits = result.logits.detach().cpu()
        logits = torch.nn.functional.softmax(logits, dim=2)
        label_ids = batch[1].to('cpu').numpy()
        for i in range(len(logits)):
            preds.extend(torch.argmax(logits[i],dim=1).tolist())
        for j in range(len(label_ids)):
            labels.extend(label_ids[j])
    precision = precision_score(labels, preds, average='macro')
    recall = recall_score(labels, preds, average='macro')
    f1 = f1_score(labels, preds, average='macro')
    report = classification_report(labels, preds)
    print('train', report)
    model.eval()
    aver_loss = 0
    preds, labels = [], []
    for i, batch in enumerate(test_dataloader):
        with torch.no_grad():
            result = model(batch[0].to(DEVICE), token_type_ids=None, attention_mask=(batch[0]>0).to(DEVICE), labels=batch[1].to(DEVICE))

            total_val_loss += result.loss.item()
            
            logits = result.logits.detach().cpu()
            logits = torch.nn.functional.softmax(logits, dim=2)
            label_ids = batch[1].to('cpu').numpy()
            for i in range(len(logits)):
                preds.extend(torch.argmax(logits[i],dim=1).tolist())
            for j in range(len(label_ids)):
                labels.extend(label_ids[j])
    precision = precision_score(labels, preds, average='macro')
    recall = recall_score(labels, preds, average='macro')
    f1 = f1_score(labels, preds, average='macro')
    report = classification_report(labels, preds)
    print('test', report)
    if f1>BEST_F1:
        model_to_save = model.module if hasattr(model, 'module') else model
        #如果使用预定义的名称保存，则可以使用`from_pretrained`加载
        output_model_file = os.path.join(model_output_dir, WEIGHTS_NAME)
        output_config_file = os.path.join(model_output_dir, CONFIG_NAME)
        torch.save(model_to_save.state_dict(), output_model_file)
        model_to_save.config.to_json_file(output_config_file)
        tokenizer.save_vocabulary(model_output_dir)