import evaluate
import os
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer, DataCollatorForTokenClassification
import torch
from bdtime import show_json, show_ls
import numpy as np
from collections import OrderedDict
from transformers import pipeline


label_list = ['O',
              'B_cn_title', 'I_cn_title',
              'B_cn_authors', 'I_cn_authors',
              'B_cn_institutions', 'I_cn_institutions',
              'B_en_title', 'I_en_title',
              'B_en_authors', 'I_en_authors',
              'B_en_institutions', 'I_en_institutions']


proxies = {
    "http": "http://127.0.0.1:7890",
    "https": "http://127.0.0.1:7890",
}
local_files_only = False

# tokenizer_name = "hfl/chinese-macbert-base"
# model_name = "hfl/chinese-macbert-base"
tokenizer_name = "OctopusMind/longbert-8k-zh"
model_name = "OctopusMind/longbert-8k-zh"

save_dir = 'tempdir/ner_models/article_2048'
if os.path.exists(save_dir):
    print('====== save dir is exists!')
    model_name = save_dir
    local_files_only = True
    proxies = None
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, proxies=proxies, local_files_only=local_files_only)


use_last_checkpoint = True
if use_last_checkpoint:
    checkpoints_dir = r'E:\projects\pycharmProjects\article_structure_analysis\checkpoints'
    if os.path.exists(checkpoints_dir):
        _checkpoints = os.listdir(checkpoints_dir)
        if _checkpoints:
            checkpoints = sorted(_checkpoints, key=lambda x: os.path.getmtime(os.path.join(checkpoints_dir, x)))
            last_checkpoint = os.path.join(checkpoints_dir, checkpoints[-1])
            print('--- use_last_checkpoint:', last_checkpoint)
            model_name = last_checkpoint

model = AutoModelForTokenClassification.from_pretrained(model_name, num_labels=len(label_list), proxies=proxies, local_files_only=local_files_only)
model.config.num_labels

device = torch.device("cuda")
model.to(device)

model.config.id2label = {idx: label for idx, label in enumerate(label_list)}
model.config.id2label

# res = ner_pipe("小明在北京上班")

# text = "小明在北京上班"
# tokenized_exmaples = tokenizer(text, max_length=512, padding=True, truncation=True, is_split_into_words=False)
# res = ner_pipe(tokenized_exmaples)

#
#
# ner_result = {}
# x = text
# for r in res:
#     if r["entity_group"] not in ner_result:
#         ner_result[r["entity_group"]] = []
#     ner_result[r["entity_group"]].append(x[r["start"]: r["end"]])
# print(ner_result)
# show_json(ner_result)


def analysis_text(text, target_labels=None):
    tokenized_exmaples = tokenizer(text, max_length=2048, padding=True, truncation=True, is_split_into_words=False)

    input_ids = tokenized_exmaples['input_ids']
    token_type_ids = tokenized_exmaples['token_type_ids']
    attention_mask = tokenized_exmaples['attention_mask']

    input_ids = torch.LongTensor([input_ids]).to(device)
    attention_mask = torch.LongTensor([attention_mask]).to(device)
    token_type_ids = torch.LongTensor([token_type_ids]).to(device)

    outputs = model.forward(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)

    predictions = outputs.logits.detach().cpu().numpy()
    predictions = np.argmax(predictions, axis=-1)[0]

    # label_i = 2
    target_labels = target_labels if target_labels else [2, 4, 6, 8]
    for label_i in target_labels:
        indexes = np.where(np.logical_or(predictions == label_i - 1, predictions == label_i))[0]
        word_ids = tokenized_exmaples.word_ids()
        title_ids = [word_ids[i] for i in indexes]
        title_ids = list(OrderedDict.fromkeys(title_ids))
        title_string = [text[i] for i in title_ids if i is not None]

        print(f'--- text target ner [{label_list[label_i - 1], label_list[label_i]}]:\n', ''.join(title_string))


ner_pipe = pipeline("token-classification", model=model, tokenizer=tokenizer, device=0, aggregation_strategy="simple")


if __name__ == '__main__':
    # text = """"""
    import pandas as pd
    input_csv = "tempdir/outputs/style_1/style_1__n_1000.csv"
    df = pd.read_csv(input_csv)
    df = df.drop(columns=['Unnamed: 0'])

    k = 4

    # region # --- 测试
    text = df['text'][k]
    true_labels = df['label'][k]
    true_labels = eval(true_labels)
    true_labels = np.array(true_labels)
    true_span = df['label_span'][k]
    true_span = eval(true_span)
    true_span

    # df['label_span'].str.contains("en__title").sum()
    # df['label_span'].str.contains("en__institutions").sum()
    # df['label_span'].str.contains("en__authors").sum()
    # df['label_span'].str.contains("cn__author").sum()

    print(text)
    print('\n======= text ============')

    target_labels = [2, 4, 6, 8, 10, 12]
    print('--- target_labels:', [label_list[l] for l in target_labels])

    if 1:
        print('======= true_labels ============')

        target_labels = target_labels if target_labels else [2, 4, 6, 8]

        label_i = 12
        for label_i in target_labels:
            indexes = np.where(np.logical_or(true_labels == label_i - 1, true_labels == label_i))[0]
            title_string = [text[i] for i in indexes if i is not None]
            print(f'~~~ text true_labels [{label_list[label_i - 1], label_list[label_i]}]:\n', ''.join(title_string))

    print('\n======= analysis_text ============')
    analysis_text(text, target_labels)
    # endregion

    # res = ner_pipe(text)
    # show_ls(res)
    #
    # ner_result = {}
    # delta = -3
    # for r in res:
    #     if r["entity_group"] not in ner_result:
    #         ner_result[r["entity_group"]] = []
    #     ner_result[r["entity_group"]].append(text[r["start"] + delta: r["end"] + delta])
    #
    # show_json(ner_result)


# from transformers import BertForTokenClassification
# test = BertForTokenClassification()
