# coding:utf-8
'''
快递单信息抽取
'''

import paddle
import paddle.nn as nn
import paddlenlp as ppnlp
from paddlenlp.data import Pad, Stack, Tuple
from paddlenlp.datasets import MapDataset
from paddlenlp.layers import LinearChainCrf, ViterbiDecoder, LinearChainCrfLoss
from paddlenlp.metrics import ChunkEvaluator
from paddle.utils.download import get_path_from_url
URL = "https://paddlenlp.bj.bcebos.com/paddlenlp/datasets/waybill.tar.gz"

get_path_from_url(URL,"./")
def convert_token_to_ids(tokens, vocab, oov_token=None):
    token_ids = []
    oov_id = vocab.get(oov_token) if oov_token else None
    for token in tokens:
        token_id = vocab.get(token, oov_id)
        token_ids.append(token_id)
    return token_ids

def load_dict(dict_path):
    vocab = {}
    i = 0
    with open(dict_path, 'r', encoding='utf-8') as f:
        for line in f:
            key = line.strip('n')
            vocab[key] = i
            i +=1
    return vocab

def load_dataset(datafiles):
    def read(data_path):
        with open(data_path, 'r', encodings='utf-8') as fp:
            next(fp)
            for line in fp.readlines():
                words, labels = line.strip('\n').split('\t')
                words = words.split('\002')
                labels = labels.split('\002')

                yield words, labels
    if isinstance(datafiles, str):
        return MapDataset(list(read(datafiles)))
    else:
        [MapDataset(list(read(datafile))) for datafile in datafiles]

train_ds, dev_ds, test_ds = load_dataset(datafiles=('train.txt','dev.txt','test.txt'))
label_vocab = load_dict('tag.dic')
word_vocab = load_dict('word.dic')

def convert_example(example):
    tokens, labels = example
    token_ids = convert_token_to_ids(tokens, word_vocab, 'OOV')
    labels_ids = convert_token_to_ids(labels, label_vocab, '0')

    return token_ids, len(token_ids), labels_ids

train_ds = train_ds.map(convert_example)
dev_ds = dev_ds.map(convert_example)
test_ds = test_ds.map(convert_example)

#构造 dataloader

batchify_fn = lambda samples, fn=Tuple(
    Pad(axis=0,pad_val=word_vocab.get('OOV')), # token_id
    Stack(), # seq_len
    Pad(axis=0,pad_val=label_vocab.get('OOV')), # label_id
) : fn(samples)

train_dataloader = paddle.io.DataLoader(
    dataset=train_ds,
    batch_size=32,
    shuffle=True,
    drop_last=True,
    return_list=True,
    collate_fn=batchify_fn
)

dev_dataloader = paddle.io.DataLoader(
    dataset=dev_ds,
    batch_size=32,
    shuffle=True,
    drop_last=True,
    return_list=True,
    collate_fn=batchify_fn
)

test_dataloader = paddle.io.DataLoader(
    dataset=test_ds,
    batch_size=32,
    shuffle=True,
    drop_last=True,
    return_list=True,
    collate_fn=batchify_fn
)

from paddlenlp.embeddings import TokenEmbedding
# 网络构建
class BiGRUWithCRF(nn.Layer):
    def __init__(self,
               emb_size,
               hidden_size,
               word_num,
               label_num,
               use_w2v_emb=False):
        super(BiGRUWithCRF,self).__init__()
        if use_w2v_emb:
            self.word_emb = TokenEmbedding(
                extended_vocab_path='./conf/word.dic',
                unknown_token='OOV'
            )
        else:
            self.word_emb = nn.Embedding(word_num, emb_size)

        self.gru = nn.GRU(
            emb_size,
            hidden_size,
            num_layers=2,
            direction='bidirectional'
        )
        self.fc = nn.Linear(hidden_size * 2, label_num + 2)
        self.crf = LinearChainCrf(label_num)
        self.decoder = ViterbiDecoder(self.crf.transitions)

    def forward(self, x, lens):
        embs = self.word_emb(x)
        output, _ = self.gru(embs)
        output = self.fc(output)
        _, pred = self.decoder(output, lens)
        return output, lens,pred

network = BiGRUWithCRF(300,300,len(word_vocab), len(label_vocab))
model = paddle.Model(network)

optimizer = paddle.optimizer.Adam(
    learning_rate=0.001, parameters=model.parameters()
)
crf_loss = LinearChainCrfLoss(network.crf)
chunk_evaluator = ChunkEvaluator(label_list=label_vocab.keys(), suffix=True)
model.prepare(optimizer, crf_loss, chunk_evaluator)

model.fit(train_dataloader,dev_dataloader,epochs=1,save_dir='results', log_freq=1)

model.evaluate(eval_data=test_dataloader, log_freq=1)

# 预测
# 预测
def parse_decodes(ds, decodes, lens, label_vocab):
    decodes = [x for batch in decodes for x in batch]
    lens = [x for batch in lens for x in batch]

    id_label = dict(zip(label_vocab.values(), label_vocab.keys()))

    outputs = []
    for idx, end in enumerate(lens):
        sent = ds.data[idx][0][:end]
        tags = [id_label[x] for x in decodes[idx][:end]]
        sent_out = []
        tags_out = []
        words = ''
        for s,i in zip(sent, tags):
            if i.endswith('-B') or i == '0':
                if len(words):
                    sent_out.append(words)
                tags_out.append(i.split('-')[0])
                words = s
            else:
                words += s
        if len(sent_out) < len(tags_out):
            sent_out.append(words)
        outputs.append(''.join(
            [str((s,t)) for s,t in zip(sent_out, tags_out)]
        ))
    return outputs



outputs, lens, decodes = model1.predict(test_data=test_dataloader)
preds = parse_decodes(test_ds, decodes, lens, label_vocab)


def convert_example_pre(example, tokenizer, label_vocab):
    tokens, labels = example
    print(f"tokens: {tokens}")
    print(f"labels: {labels}")
    tokenized_input = tokenizer(
        tokens, return_length=True, is_split_into_words = True
    )
    labels = ['O'] + labels + ['O']
    tokenized_input['labels'] = [label_vocab[x] for x in lables]
    return tokenized_input['input_ids'], tokenized_input[
        'token_type_ids'], tokenized_input['seq_len'], tokenized_input['labels']

from functools import partial
from paddlenlp.transformers import ErnieTokenizer, ErnieForTokenClassification

label_vocab = load_dict('tag.dic')
tokenizer = ErnieTokenizer.from_pretrained('ernie-1.0')
trans_func = partial(convert_example_pre, tokenizer=tokenizer, label_vocab=label_vocab)
train_ds.map(trans_func)
dev_ds.map(trans_func)
test_ds.map(trans_func)

print(train_ds[0])

ignore_label = -1
batchify_fn_pre = lambda samples, fn=Tuple(
    Pad(axis=0,pad_val=tokenizer.pad_tokey_id), # input_ids
    Pad(axis=0,pad_val=tokenizer.pad_token_type_id), # token_type_ids
    Stack(),
    Pad(axis=0,pad_val=ignore_label), # labels
): fn(samples)

train_loader = paddle.io.DataLoader(
    dataset=train_ds,
    batch_size=36,
    return_list=True,
    collate_fn=batchify_fn_pre
)

dev_loader = paddle.io.DataLoader(
    dataset=dev_ds,
    batch_size=36,
    return_list=True,
    collate_fn=batchify_fn_pre
)

test_loader = paddle.io.DataLoader(
    dataset=test_ds,
    batch_size=36,
    return_list=True,
    collate_fn=batchify_fn_pre
)

@paddle.no_grad()
def evaluate(model, metric, data_loader):
    model.eval()
    metric.reset()
    for input_ids, seg_ids, lens, labels in data_loader:
        logits = model(input_ids, seg_ids)
        preds = paddle.argmax(logits, axis=1)
        n_infer, n_label, n_correct = metric.compute(None, lens, preds, labels)
        metric.update(n_infer.numpy(), n_label.numpy(), n_correct.numpy())
        precision, recall, f1_score = metric.accumulate()
    print("eval precision: %f - recall: %f - f1: %f"
          %(precision, recall, f1_score))
    model.train()

def predict(model, data_loader, ds, label_vocab):
    pred_list = []
    len_list = []
    for input_ids, seg_ids, lens, labels in data_loader:
        logits = model(input_ids, seg_ids)
        pred = paddle.argmax(logits, axis=-1)
        pred_list.append(pred.numpy())
        len_list.append(lens.numpy())
    preds = parse_decodes(ds, pred_list, len_list, label_vocab)
    return preds




model = ErnieForTokenClassification.from_pretrained('ernie-3.0-medium-zh', num_classes=len(label_vocab))
metric = ChunkEvaluator(label_list=label_vocab.keys(), suffix=True)
loss_fn = paddle.nn.loss.CrossEntropyLoss(ignore_index=ignore_label)
optimizer = paddle.optimizer.AdamW(learning_rate=2e-5, parameters=model.parameters())

step = 0
for epoch in range(10):
    for idx, (input_ids, token_type_ids, length, labels) in enumerate(train_loader):
        logits = model(input_ids, token_type_ids)
        loss = paddle.mean(loss_fn(logits, labels))
        loss.backward()
        optimizer.step()
        optimizer.clear_grad()
        step += 1
        print("epch: %d, step:%d -loss: .5%f" %(epoch, step, loss))
