import numpy as np
np.random.seed(1)
import pandas as pd
pd.set_option('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', 1000, 'display.expand_frame_repr', False)
from datasets import load_dataset, Dataset
from PyCmpltrtok.common import sep
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
import torch
import evaluate


def preprocess_function(examples):
    questions = [q.strip() for q in examples["question"]]
    inputs = tokenizer(
        questions,
        examples["context"],
        max_length=384,
        truncation="only_second",
        return_offsets_mapping=True,
        padding="max_length",
        return_tensors='pt',
    )

    inputs_dev = {k: v.to(dev) for k, v in inputs.items() if k in set(['input_ids', 'attention_mask'])}
    outputs = model(**inputs_dev)
    answer_start_index = outputs.start_logits.argmax(axis=1)
    answer_end_index = outputs.end_logits.argmax(axis=1)
    inputs['start'] = answer_start_index
    inputs['end'] = answer_end_index
    
    offset_mapping = inputs.pop("offset_mapping")
    answers = examples["answers"]
    start_positions = []
    end_positions = []

    for i, offset in enumerate(offset_mapping):
        answer = answers[i]
        # start_char和end_char都是context字符的offset
        start_char = answer["answer_start"][0]
        end_char = answer["answer_start"][0] + len(answer["text"][0])
        sequence_ids = inputs.sequence_ids(i)

        # Find the start and end of the context
        idx = 0  # idx是token的offset
        while sequence_ids[idx] != 1:
            idx += 1
        context_start = idx
        while sequence_ids[idx] == 1:
            idx += 1
        context_end = idx - 1

        # If the answer is not fully inside the context, label it (0, 0)
        if offset[context_start][0] > end_char or offset[context_end][1] < start_char:
            start_positions.append(0)
            end_positions.append(0)
        else:
            # Otherwise it's the start and end token positions
            idx = context_start  # idx是token的offset
            while idx <= context_end and offset[idx][0] <= start_char:
                idx += 1
            start_positions.append(idx - 1)

            idx = context_end
            while idx >= context_start and offset[idx][1] >= end_char:
                idx -= 1
            end_positions.append(idx + 1)

    inputs["start_positions"] = start_positions
    inputs["end_positions"] = end_positions

    return inputs


def get_pred(example):
    pred_ids = example['input_ids'][example['start']:example['end']+1]
    pred = tokenizer.decode(pred_ids, skip_special_tokens=True)
    return {
        'prediction_text': pred
    }
    
    
if '__main__' == __name__:
    # model_path = "distilbert-base-uncased"
    # WSL original
    #model_path = "/home/peiyp2004/.cache/huggingface/hub/models--distilbert-base-uncased/snapshots/12040accade4e8a0f71eabdb258fecc2e7e948be"
    # WSL trained
    model_path = "/home/peiyp2004/jupyter_notebook.large.d/distilbert-base-uncased_QA_on_squad_2024_01_01_19_15_44_613229_temp0/checkpoint-2188"

    # ds_path = "squad"
    # WSL
    ds_path = "/home/peiyp2004/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453"

    # metric_path = "squad"
    metric_path = "/home/peiyp2004/.cache/huggingface/modules/evaluate_modules/metrics/evaluate-metric--squad/b4e2dbca455821c7367faa26712f378254b69040ebaab90b64bdeb465e4a304d/squad.py"

    
    sep('Validation data')
    squad4val = load_dataset(ds_path, split="validation")
    squad4val_small = squad4val.select(range(40))
    
    sep('Model')
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    print(tokenizer)
    dev = torch.device('cuda:0')
    model = AutoModelForQuestionAnswering.from_pretrained(model_path)
    model = model.to(dev)
    print(model)
    
    sep('Infer')
    squad4val_small_result = squad4val_small.map(preprocess_function, batched=True, batch_size=16)
    ds = squad4val_small_result.map(get_pred, batched=False)
    metric = evaluate.load(metric_path)
    r = metric.compute(predictions=ds.select_columns(['id', 'prediction_text']), references=ds.select_columns(['id', 'answers']))
    print(r)