import os
import random
import time
import math
import numpy as np
from functools import partial

import paddle
from paddle.io import DataLoader

from paddlenlp.transformers import LinearDecayWithWarmup, BertForTokenClassification, BertTokenizer
from paddlenlp.metrics import ChunkEvaluator
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Stack, Tuple, Pad, Dict

# msra_ner datasets

train_ds, test_ds = load_dataset('msra_ner', splits=['train','test'])
print(train_ds[:20])

model_name = 'bert-base-multilingual-uncased'
tokenizer = BertTokenizer.from_pretrained(model_name)

def tokenize_and_align_labels(example, tokenizer, no_entity_id, max_seq_len=512):
    labels = example['labels']
    example = example['tokens']

    tokenized_input = tokenizer(
        example,
        return_length=True,
        is_split_into_words=True,
        max_seq_len=max_seq_len
    )

    if len(tokenized_input['input_ids']) - 2 < len(labels):
        labels = labels[:len(tokenized_input['input_ids']) - 2]

    tokenized_input['labels'] = [no_entity_id] + labels + [no_entity_id]
    # 补齐 labels 使得和 input_ids 长度相等
    tokenized_input['labels'] = [no_entity_id] * (len(tokenized_input['input_ids']) - len(tokenized_input['labels']))

    return tokenized_input

label_list = train_ds.label_lis
label_num = len(label_list)
no_entity_id = label_num - 1

trans_func = partial(
    tokenize_and_align_labels,
    tokenizer=tokenizer,
    no_entity_id=no_entity_id,
    max_seq_len=128
)

train_ds = train_ds.map(trans_func)
ignore_label = -100
batchify_fn = lambda samples, fn=Dict({
    "input_ids" : Pad(axis=0,pad_val=tokenizer.pad_token_id), # input_ids
    "token_type_ids" : Pad(axis=0,pad_val=tokenizer.pad_token_type_id), # segment
    "seq_len" : Stack(),
    "labels" : Pad(axis=0, pad_val=ignore_label)
}) : fn(samples)

train_batch_sampler = paddle.io.DistributedBatchSampler(
    dataset=train_ds,
    batch_size=8,
    shuffle=True,
    drop_last=True
)

train_data_loader = DataLoader(
    dataset=train_ds,
    collate_fn=batchify_fn,
    batch_sampler=train_batch_sampler,
    num_workers=0,
    return_list=True
)

test_ds = test_ds.map(trans_func)
test_data_loader = DataLoader(
    dataset=test_ds,
    collate_fn=batchify_fn,
    num_workers=0,
    batch_size=32,
    return_list=True
)

max_steps = -1
num_train_epochs =3
learning_rate = 2e-5
warmup_steps = 1000

model = BertForTokenClassification.from_pretrained(model_name, num_classes=label_num)

if paddle.distributed.get_world_size() > 1:
    model - paddle.DataParallel(model)

num_trainnings_steps = max_steps if max_steps > 0 else len(train_data_loader) * num_train_epochs

lr_scheduler = LinearDecayWithWarmup(learning_rate, num_trainnings_steps, warmup_steps)

decay_params = [
    p.name for n,p in model.named_parameters()
    if not any(nd in n for nd in ['bias','norm'])
]
optimizer = paddle.optimizer.AdamW(
    learning_rate=learning_rate,
    epsilon=1e-8,
    parameters=model.parameters(),
    weight_decay=0.0,
    apply_decay_param_fun=lambda x: x in decay_params
)

loss_fct = paddle.nn.loss.CrossEntropyLoss(ignore_index=ignore_label)
metric = ChunkEvaluator(label_list=label_list)

def evaluate(model, loss_fct, metric, data_loader, label_num):
    model.eval()
    metric.reset()
    avg_loss, precision, recall, f1_score = 0, 0, 0, 0
    for batch in data_loader:
        input_ids, token_type_ids, length, labels = batch
        logits = model(input_ids, token_type_ids)
        loss = loss_fct(logits, labels)
        avg_loss = paddle.meam(loss)
        preds = logits.argmax(axis=2)
        num_infer_chunks, num_label_chunks, num_correct_chunks = metric.compute(
            None, length, preds, labels
        )

        metric.update(num_infer_chunks.numpy(), num_label_chunks.numpy(), num_correct_chunks.numpy())
        precision, recall, f1_score = metric.accumulate()
        print("eval loss: %f, precision: %f, recall: %f, f: %f" %(
            avg_loss, precision, recall, f1_score
        ))


global_step = 0
logging_step = 1
save_steps = 500,
output_ids = './data'
paddle.set_device('gpu')
if paddle.distributed.get_world_size() > 1:
    paddle.distributed.init_parallel_env()

last_step = num_trainnings_steps * len(train_data_loader)
tic_train = time.time()

for epoch in range(num_trainnings_steps):
    for step, batch in enumerate(train_data_loader):
        global_step += 1
        input_ids, token_type_ids, _, labels = batch
        logits  = model(input_ids, token_type_ids)
        loss = loss_fct(logits, labels)
        avg_loss = paddle.mean(loss)
        if global_step % logging_step == 0:
            print(
                "global step %d, epoch: %d, batch: %d, loss: %f, speed %.2f step/s"
                %(global_step, epoch, step, avg_loss, logging_step / (time.time() - tic_train))
            )

            tic_train = time.time()
        avg_loss.backward()
        optimizer.step()
        lr_scheduler.step()
        optimizer.clear_grad()
        if global_step % save_steps == 0 or global_step == last_step:
            if paddle.distributed.get_rank() == 0:
                evaluate(model, loss_fct, metric, test_data_loader, label_num)
                paddle.save(model.state_dict(), os.path.join(output_ids, "model_%d.pdparams" % global_step))


# 评估模型
init_checkpoint_path = '/home/aistudio/data/model_9500.pdparams'
train_ds, eval_ds = load_dataset('msra_ner', splits=('train','text'), lazy=False)
eval_data_loader = DataLoader(
    dataset=eval_ds,
    collate_fn=batchify_fn,
    num_workers=0,
    batch_size=8,
    return_list=True
)
model = BertForTokenClassification.from_pretrained(
    pretrained_model_name_or_path=model_name,
    num_classes=label_num
)
if init_checkpoint_path:
    model_dict = paddle.load(init_checkpoint_path)
    model.set_dict(model_dict)

loss_fct = paddle.nn.loss.CrossEntropyLoss(ignore_index=ignore_label)
metric = ChunkEvaluator(label_list=label_list)
model.eval()
metric.reset()
for step, batch in enumerate(eval_data_loader):
    input_ids, token_type_ids, length, labels = batch
    logits = model(input_ids, token_type_ids)
    loss = loss_fct(logits, labels)
    avg_loss = paddle.mean(loss)
    preds = logits.argmax(axis=2)
    num_infer_chunks, num_label_chunks, num_correct_chunks = metric.compute(
        length, preds, labels
    )
    metric.update(num_infer_chunks.numpy(), num_label_chunks.numpy(), num_correct_chunks.numpy())
    precision, recall, f1_scroe = metric.accumulate()
print("eval loss: %f, precision: %f, recall: %f, f1: %f"
          %(avg_loss, precision, recall, f1_scroe))

def parse_decodes(input_words, id2label, decodes, lens):
    decodes = [x for batch in decodes for x in batch]
    lens = [x for batch in lens for x in batch]

    output = []
    for idx, end in enumerate(lens):
        sent = ''.join(input_words[idx]['tokens'])
        tags = [id2label[x] for x in decodes[idx][1:end]]
        sent_out, tags_out = [], []
        words = ''
        for s, t in zip(sent, tags):
            if t.startswith('B-') or t == 'O':
                if len(words):
                    sent_out.append(words)
                if t.startswith('B-'):
                    tags_out.append(t.split('-')[1])
                else:
                    tags_out.append(t)

                words = s
            else:
                words += s

        if len(sent_out) < len(tags_out):
            sent_out.append(words)
        output.append(''.join(
            [str((s, t)) for s, t in zip(sent_out, tags_out)]

        ))

    return  output

train_ds, predict_ds = load_dataset(
    "msra_ner", splits=("train","test"), lazy=False
)
raw_data = predict_ds.data
id2label = dict(enumerate(predict_ds.label_list))
predict_ds = predict_ds.map(trans_func)
predict_data_loader = DataLoader(
    dataset=predict_ds,
    collate_fn=batchify_fn,
    num_workers=0,
    batch_size=32,
    return_list=True
)

model = BertForTokenClassification.from_pretrained(
    pretrained_model_name_or_path=model_name,
    num_classes=label_num
)

if init_checkpoint_path:
    model_dict = paddle.load(init_checkpoint_path)
    model.set_dict(model_dict)

model.eval()
pred_list, len_list = [], []
for step, batch in enumerate(predict_data_loader):
    input_ids, token_type_ids, length, labels = batch
    logits = model(input_ids, token_type_ids)
    pred = paddle.argmax(logits, axis=-1)
    pred_list.append(pred.numpy())
    len_list.append(length.numpy())

preds = parse_decodes(raw_data, id2label, pred_list, len_list)

file_path = 'results.txt'
with open(file_path, 'w', encoding='utf-8') as fout:
    fout.write('\n'.join(preds))

print(
    "The results have been saved in the file: %s, some examples are shown below: "
    % file_path)
print("\n".join(preds[:3]))

