import numpy as np  # linear algebra
import pandas as pd  # data processing, CSV file I/O (e.g. pd.read_csv)
from ast import literal_eval
from itertools import chain
from sklearn.metrics import precision_recall_fscore_support
from tqdm import tqdm
from sklearn.model_selection import StratifiedKFold

import torch
from transformers import AutoModel, AutoTokenizer

ROOT = "./data/nbme-score-clinical-patient-notes"


def create_train_df(debug=False):
    feats = pd.read_csv(f"{ROOT}/features.csv")
    notes = pd.read_csv(f"{ROOT}/patient_notes.csv")
    train = pd.read_csv(f"{ROOT}/train.csv")

    train["annotation_list"] = [literal_eval(x) for x in train["annotation"]]
    train["location_list"] = [literal_eval(x) for x in train["location"]]
    merged = train.merge(notes, how="left")
    merged = merged.merge(feats, how="left")
    merged = merged.loc[merged["annotation"] != "[]"].copy().reset_index(
        drop=True)  # comment out if you train all samples

    def process_feature_text(text):
        return text.replace("-OR-", ";-").replace("-", " ")

    merged["feature_text"] = [process_feature_text(x) for x in merged["feature_text"]]

    merged["feature_text"] = merged["feature_text"].apply(lambda x: x.lower())
    merged["pn_history"] = merged["pn_history"].apply(lambda x: x.lower())

    if debug:
        merged = merged.sample(frac=0.5).reset_index(drop=True)

    skf = StratifiedKFold(n_splits=5)
    merged["stratify_on"] = merged["case_num"].astype(str) + merged["feature_num"].astype(str)
    merged["fold"] = -1
    for fold, (_, valid_idx) in enumerate(skf.split(merged["id"], y=merged["stratify_on"])):
        merged.loc[valid_idx, "fold"] = fold

    print(merged.shape)
    return merged


df = create_train_df()
first = df.loc[0]
example = {
    "feature_text": first.feature_text,
    "pn_history": first.pn_history,
    "location_list": first.location_list,
    "annotation_list": first.annotation_list
}
for key in example.keys():
    print(key)
    print(example[key])
    print("=" * 100)

df = create_train_df()


def loc_list_to_ints(loc_list):
    to_return = []
    for loc_str in loc_list:
        loc_strs = loc_str.split(";")
        for loc in loc_strs:
            start, end = loc.split()
            to_return.append((int(start), int(end)))
    return to_return


print(example["location_list"])
example_loc_ints = loc_list_to_ints(example["location_list"])[0]
print(example_loc_ints)
print(example["pn_history"][example_loc_ints[0]: example_loc_ints[1]])

MODEL_NAME = './data/bert-base-uncased'
# Tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True)


def tokenize_and_add_labels(tokenizer, example):
    tokenized_inputs = tokenizer(
        example["feature_text"],
        example["pn_history"],
        truncation="only_second",
        max_length=416,  # max length is 406
        padding="max_length",
        return_offsets_mapping=True
    )
    labels = [0.0] * len(tokenized_inputs["input_ids"])
    tokenized_inputs["location_int"] = loc_list_to_ints(example["location_list"])
    # print(tokenized_inputs)
    tokenized_inputs["sequence_ids"] = tokenized_inputs.input_ids

    for idx, (seq_id, offsets) in enumerate(zip(tokenized_inputs["sequence_ids"], tokenized_inputs["offset_mapping"])):
        if seq_id is None or seq_id == 0:
            labels[idx] = -100
            continue
        exit = False
        token_start, token_end = offsets
        for feature_start, feature_end in tokenized_inputs["location_int"]:
            if exit:
                break
            if token_start >= feature_start and token_end <= feature_end:
                labels[idx] = 1.0
                exit = True
    tokenized_inputs["labels"] = labels

    return tokenized_inputs


tokenized_inputs = tokenize_and_add_labels(tokenizer, example)

for key in tokenized_inputs.keys():
    print(key)
    print(tokenized_inputs[key])
    print("=" * 100)


# Dataset
class NBMEData(torch.utils.data.Dataset):
    def __init__(self, data, tokenizer):
        self.data = data
        self.tokenizer = tokenizer

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        example = self.data.loc[idx]
        tokenized = tokenize_and_add_labels(self.tokenizer, example)

        input_ids = np.array(tokenized["input_ids"])  # for input BERT
        attention_mask = np.array(tokenized["attention_mask"])  # for input BERT
        labels = np.array(tokenized["labels"])  # for calculate loss and cv score

        offset_mapping = np.array(tokenized["offset_mapping"])  # for calculate cv score
        sequence_ids = np.array(tokenized["sequence_ids"]).astype("float16")  # for calculate cv score

        return input_ids, attention_mask, labels, offset_mapping, sequence_ids


# Model
class NBMEModel(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.backbone = AutoModel.from_pretrained(MODEL_NAME)  # BERT model
        self.dropout = torch.nn.Dropout(p=0.2)
        self.classifier = torch.nn.Linear(768, 1)  # BERT has last_hidden_state(size: sequqence_length, 768)

    def forward(self, input_ids, attention_mask):
        last_hidden_state = self.backbone(input_ids=input_ids, attention_mask=attention_mask)[
            0]  # idx 0 is last_hidden_state; backbone().last_hidden_state is also good
        logits = self.classifier(self.dropout(last_hidden_state)).squeeze(-1)
        return logits


fold = 0
BATCH_SIZE = 16
EPOCHS = 3
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

model = NBMEModel().to(DEVICE)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5)

train = df.loc[df["fold"] != fold].reset_index(drop=True)
valid = df.loc[df["fold"] == fold].reset_index(drop=True)
train_ds = NBMEData(train, tokenizer)
valid_ds = NBMEData(valid, tokenizer)
train_dl = torch.utils.data.DataLoader(train_ds, batch_size=BATCH_SIZE, pin_memory=True, shuffle=True, drop_last=True)
valid_dl = torch.utils.data.DataLoader(valid_ds, batch_size=BATCH_SIZE * 2, pin_memory=True, shuffle=False,
                                       drop_last=False)


class AverageMeter(object):
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


def get_location_predictions(preds, offset_mapping, sequence_ids, test=False):
    all_predictions = []
    for pred, offsets, seq_ids in zip(preds, offset_mapping, sequence_ids):
        pred = sigmoid(pred)
        start_idx = None
        current_preds = []
        for p, o, s_id in zip(pred, offsets, seq_ids):
            if s_id is None or s_id == 0:
                continue
            if p > 0.5:
                if start_idx is None:
                    start_idx = o[0]
                end_idx = o[1]
            elif start_idx is not None:
                if test:
                    current_preds.append(f"{start_idx} {end_idx}")
                else:
                    current_preds.append((start_idx, end_idx))
                start_idx = None
        if test:
            all_predictions.append("; ".join(current_preds))
        else:
            all_predictions.append(current_preds)
    return all_predictions


def calculate_char_CV(predictions, offset_mapping, sequence_ids, labels):
    all_labels = []
    all_preds = []
    for preds, offsets, seq_ids, labels in zip(predictions, offset_mapping, sequence_ids, labels):
        num_chars = max(list(chain(*offsets)))
        char_labels = np.zeros((num_chars))
        for o, s_id, label in zip(offsets, seq_ids, labels):
            if s_id is None or s_id == 0:
                continue
            if int(label) == 1:
                char_labels[o[0]:o[1]] = 1
        char_preds = np.zeros((num_chars))
        for start_idx, end_idx in preds:
            char_preds[start_idx:end_idx] = 1
        all_labels.extend(char_labels)
        all_preds.extend(char_preds)
    results = precision_recall_fscore_support(all_labels, all_preds, average="binary")
    return {
        "precision": results[0],
        "recall": results[1],
        "f1": results[2]
    }


def compute_metrics(p):
    predictions, y_true = p
    y_true = y_true.astype(int)
    y_pred = [
        [int(p > 0.5) for (p, l) in zip(pred, label) if l != -100]
        for pred, label in zip(predictions, y_true)
    ]
    y_true = [
        [l for l in label if l != -100] for label in y_true
    ]
    results = precision_recall_fscore_support(list(chain(*y_true)), list(chain(*y_pred)), average="binary")
    return {
        "token_precision": results[0],
        "token_recall": results[1],
        "token_f1": results[2]
    }


history = {"train": [], "valid": []}
best_loss = np.inf

for epoch in range(EPOCHS):
    # training
    model.train()
    train_loss = AverageMeter()
    pbar = tqdm(train_dl)
    for batch in pbar:
        optimizer.zero_grad()
        input_ids = batch[0].to(DEVICE)
        attention_mask = batch[1].to(DEVICE)
        labels = batch[2].to(DEVICE)
        offset_mapping = batch[3]
        sequence_ids = batch[4]
        logits = model(input_ids, attention_mask)
        loss_fct = torch.nn.BCEWithLogitsLoss(reduction="none")
        loss = loss_fct(logits, labels)
        loss = torch.masked_select(loss,
                                   labels > -1).mean()  # we should calculate at "pn_history"; labels at "feature_text" are -100 < -1
        loss.backward()
        optimizer.step()
        train_loss.update(val=loss.item(), n=len(input_ids))
        pbar.set_postfix(Loss=train_loss.avg)
    print(epoch, train_loss.avg)
    history["train"].append(train_loss.avg)

    # evaluation
    model.eval()
    valid_loss = AverageMeter()
    with torch.no_grad():
        for batch in tqdm(valid_dl):
            input_ids = batch[0].to(DEVICE)
            attention_mask = batch[1].to(DEVICE)
            labels = batch[2].to(DEVICE)
            offset_mapping = batch[3]
            sequence_ids = batch[4]
            logits = model(input_ids, attention_mask)
            loss_fct = torch.nn.BCEWithLogitsLoss(reduction="none")
            loss = loss_fct(logits, labels)
            loss = torch.masked_select(loss, labels > -1).mean()
            valid_loss.update(val=loss.item(), n=len(input_ids))
            pbar.set_postfix(Loss=valid_loss.avg)
    print(epoch, valid_loss.avg)
    history["valid"].append(valid_loss.avg)

    # save model
    if valid_loss.avg < best_loss:
        best_loss = valid_loss.avg
        torch.save(model.state_dict(), "nbme.pth")

model.load_state_dict(torch.load("nbme.pth", map_location=DEVICE))

model.eval()
preds = []
offsets = []
seq_ids = []
lbls = []
with torch.no_grad():
    for batch in tqdm(valid_dl):
        input_ids = batch[0].to(DEVICE)
        attention_mask = batch[1].to(DEVICE)
        labels = batch[2].to(DEVICE)
        offset_mapping = batch[3]
        sequence_ids = batch[4]
        logits = model(input_ids, attention_mask)
        preds.append(logits.cpu().numpy())
        offsets.append(offset_mapping.numpy())
        seq_ids.append(sequence_ids.numpy())
        lbls.append(labels.cpu().numpy())
preds = np.concatenate(preds, axis=0)
offsets = np.concatenate(offsets, axis=0)
seq_ids = np.concatenate(seq_ids, axis=0)
lbls = np.concatenate(lbls, axis=0)
location_preds = get_location_predictions(preds, offsets, seq_ids, test=False)
score = calculate_char_CV(location_preds, offsets, seq_ids, lbls)
print(score)
