import torch.utils.data
from transformers import AutoTokenizer, default_data_collator, AutoModelForQuestionAnswering, DistilBertModel, get_scheduler
from datasets import load_from_disk, load_dataset

import dltools

test_loss_fn = torch.nn.CrossEntropyLoss()
test_pred = [
    [
        [0.1, 0.9],
        [0.2, 0.3],
        [0.5, 0.11],
        [0.12, 0.91],
        [0.13, 0.97],
    ],
    [
        [0.12, 0.19],
        [0.61, 0.96],
        [0.16, 0.99],
        [0.16, 0.93],
        [0.12, 0.29],
    ],[
        [0.55, 0.59],
        [0.51, 0.78],
        [0.41, 0.49],
        [0.14, 0.94],
        [0.71, 0.79],
    ]
]
test_label_start = [1, 3, 4]
test_label_end = [2, 4, 4]
test_pred = torch.tensor(test_pred)
test_label_start = torch.tensor(test_label_start)
test_label_end = torch.tensor(test_label_end)
print(f'test_pred.shape:{test_pred.shape}')
print(f'test_label_start.shape:{test_label_start.shape}')
print(f'test_label_end.shape:{test_label_end.shape}')
test_pred_start = test_pred[:, :, 0]
test_pred_end = test_pred[:, :, 1]
test_start_loss = test_loss_fn(test_pred_start, test_label_start)
test_end_loss = test_loss_fn(test_pred_end, test_label_end)
loss_1 = (test_start_loss + test_end_loss) / 2
print(f'loss_1:{loss_1}')

test_label = positions = torch.cat((test_label_start.reshape(-1, 1), test_label_end.reshape(-1, 1)), dim=1)
loss_2 = test_loss_fn(test_pred, test_label)
print(f'loss_2:{loss_2}')
print(f'test_pred.shape:{test_pred.shape}')
print(f'test_label.shape:{test_label.shape}')
test_pred = test_pred.permute(0, 2, 1)
# loss_3 = test_loss_fn(test_pred, test_label)
# print(f'loss_3:{loss_3}')

# 加载分词工具
# HF_ENDPOINT=https://hf-mirror.com
tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased', cache_dir="./MNIST/cache", force_download=False)
print(tokenizer)
print(tokenizer('What is your name?', 'My name is Sylvain.'))
# 加载数据集
dataset = load_from_disk("./MNIST/squad")
# 对数据进行采样.
dataset['train'] = dataset['train'].shuffle().select(range(20000))
dataset['validation'] = dataset['validation'].shuffle().select(range(1000))


def prepare_train_features(examples):
    # Some of the questions have lots of whitespace on the left, which is not useful and will make the
    # truncation of the context fail (the tokenized question will take a lots of space). So we remove that
    # left whitespace
    examples["question"] = [q.lstrip() for q in examples["question"]]

    # Tokenize our examples with truncation and padding, but keep the overflows using a stride. This results
    # in one example possible giving several features when a context is long, each of those features having a
    # context that overlaps a bit the context of the previous feature.
    tokenized_examples = tokenizer(
        examples['question'],
        examples['context'],
        truncation='only_second',
        max_length=384,
        stride=128,
        return_overflowing_tokens=True,
        return_offsets_mapping=True,
        padding='max_length',
    )

    # Since one example might give us several features if it has a long context, we need a map from a feature to
    # its corresponding example. This key gives us just that.
    sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
    # The offset mappings will give us a map from token to character position in the original context. This will
    # help us compute the start_positions and end_positions.
    offset_mapping = tokenized_examples.pop("offset_mapping")

    # Let's label those examples!
    tokenized_examples["start_positions"] = []
    tokenized_examples["end_positions"] = []

    for i, offsets in enumerate(offset_mapping):
        # We will label impossible answers with the index of the CLS token.
        input_ids = tokenized_examples["input_ids"][i]
        cls_index = input_ids.index(tokenizer.cls_token_id)

        # Grab the sequence corresponding to that example (to know what is the context and what is the question).
        sequence_ids = tokenized_examples.sequence_ids(i)

        # One example can give several spans, this is the index of the example containing this span of text.
        sample_index = sample_mapping[i]
        answers = examples["answers"][sample_index]
        # If no answers are given, set the cls_index as answer.
        if len(answers["answer_start"]) == 0:
            tokenized_examples["start_positions"].append(cls_index)
            tokenized_examples["end_positions"].append(cls_index)
        else:
            # Start/end character index of the answer in the text.
            start_char = answers["answer_start"][0]
            end_char = start_char + len(answers["text"][0])

            # Start token index of the current span in the text.
            token_start_index = 0
            while sequence_ids[token_start_index] != 1:
                token_start_index += 1

            # End token index of the current span in the text.
            token_end_index = len(input_ids) - 1
            while sequence_ids[token_end_index] != 1:
                token_end_index -= 1

            # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
            if not (offsets[token_start_index][0] <= start_char
                    and offsets[token_end_index][1] >= end_char):
                tokenized_examples["start_positions"].append(cls_index)
                tokenized_examples["end_positions"].append(cls_index)
            else:
                # Otherwise move the token_start_index and token_end_index to the two ends of the answer.
                # Note: we could go after the last offset if the answer is the last word (edge case).
                while token_start_index < len(offsets) and offsets[
                    token_start_index][0] <= start_char:
                    token_start_index += 1
                tokenized_examples["start_positions"].append(
                    token_start_index - 1)
                while offsets[token_end_index][1] >= end_char:
                    token_end_index -= 1
                tokenized_examples["end_positions"].append(token_end_index + 1)

    return tokenized_examples


dataset = dataset.map(
    function=prepare_train_features,
    batched=True,
    remove_columns=['id', 'title', 'context', 'question', 'answers']
)

print(f"dataset['train'][0]:{dataset['train'][0]}")

train_loader = torch.utils.data.DataLoader(
    dataset=dataset['train'],
    batch_size=16,
    collate_fn=default_data_collator,
    shuffle=True,
    drop_last=True
)

for data in train_loader:
    print(f"data:{data}")
    break


class ReadingComprehensionModel(torch.nn.Module):

    def __init__(self):
        super().__init__()
        self.pretrained = DistilBertModel.from_pretrained("distilbert-base-uncased", cache_dir="./MNIST/cache",
                                                          force_download=False)
        self.fc = torch.nn.Sequential(
            torch.nn.Dropout(0.1),
            torch.nn.Linear(768, 2)
        )

        # 加载fc层的预训练模型权重
        pretrained_fc_parameter = AutoModelForQuestionAnswering.from_pretrained("distilbert-base-uncased",
                                                                                cache_dir="./MNIST/cache",
                                                                                force_download=False)
        print(f"pretrained_fc_parameter：{pretrained_fc_parameter}")
        self.fc[1].load_state_dict(pretrained_fc_parameter.qa_outputs.state_dict())

    def forward(self, input_ids, attention_mask, start_positions, end_positions):
        # print(f"input_ids.shape:{input_ids.shape}")
        logits = self.pretrained(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
        logits = self.fc(logits)
        # print(f"logits:{logits.shape}")
        """
        # logits[:,:,0] 每一个单词被选为答案开始位置的概率，logits[:,:,1]每一个单词被选为答案结束位置的概率
        start_logits, end_logits = logits[:, :, 0], logits[:, :, 1]
        max_length = start_logits.shape[1]
        # clamp函数：数组中超过区间范围的数会被替换成0或max_length
        start_positions = start_positions.clamp(0, max_length)
        end_positions = end_positions.clamp(0, max_length)
        # ignore_index=max_length 计算损失的时候忽略掉的数值，如果答案坐标超过了max_length，说明关键数据被截断了，忽略掉这条数据
        loss_fn = torch.nn.CrossEntropyLoss(ignore_index=max_length)
        start_loss = loss_fn(start_logits, start_positions)
        end_loss = loss_fn(end_logits, end_positions)
        loss = (start_loss + end_loss) / 2
        """
        max_length = logits.shape[1]
        start_positions = start_positions.clamp(0, max_length)
        end_positions = end_positions.clamp(0, max_length)
        loss_fn = torch.nn.CrossEntropyLoss(ignore_index=max_length)
        # 其实可以把start_positions和end_positions拼接到一起直接算损失
        # print(f"start_positions.shape:{start_positions.shape}")
        # print(f"end_positions.shape:{end_positions.shape}")
        positions = torch.cat((start_positions.reshape(-1, 1), end_positions.reshape(-1, 1)), dim=1)
        loss = loss_fn(logits, positions)
        return {"loss": loss, "start_logits": logits[:, :, 0], "end_logits": logits[:, :, 1]}


model = ReadingComprehensionModel()
for data in train_loader:
    out = model(**data)
    print(f"out:{out}")
    break
device = dltools.try_gpu()


def _test(model):
    model.eval()
    model.to(device)
    validation_loader = torch.utils.data.DataLoader(
        dataset=dataset['validation'],
        batch_size=8,
        collate_fn=default_data_collator,
        shuffle=False,
        drop_last=False
    )

    start_offset = 0
    end_offset = 0

    total = 0
    for i, data in enumerate(validation_loader):
        with torch.no_grad():
            input_ids, attention_mask = data["input_ids"].to(device), data["attention_mask"].to(device)
            start_positions, end_positions = data["start_positions"].to(device), data["end_positions"].to(device)
            out = model(input_ids, attention_mask, start_positions, end_positions)
        start_logits = out["start_logits"].argmax(dim=1)
        end_logits = out["end_logits"].argmax(dim=1)
        start_offset += (start_logits - start_positions).abs().sum()
        end_offset += (end_logits - end_positions).abs().sum()
        total += len(start_logits)
        if i % 10 == 0:
            print(i)

        if i >= 100:
            print(f"avg start_offset:{start_offset / total} ")
            print(f"avg end_offset:{end_offset / total} ")
            for j in range(4):
                input_ids = data["input_ids"][j].to(device)
                pred_answer = input_ids[start_logits[j]:end_logits[j] + 1]
                label_answer = input_ids[start_positions[j]:end_positions[j] + 1]
                print(f"input_ids:{tokenizer.decode(input_ids)}")
                print(f"pred_answer:{tokenizer.decode(pred_answer)}")
                print(f"label_answer:{tokenizer.decode(label_answer)}")
            break


_test(model)



def train(model):
    optimizer = torch.optim.AdamW(model.parameters(), lr=2e-5)
    scheduler = get_scheduler(name="linear",
                              num_warmup_steps=10,
                              num_training_steps=len(train_loader),
                              optimizer=optimizer
                              )
    model = model.to(device)
    model.train()
    for i, data in enumerate(train_loader):
        input_ids, attention_mask = data["input_ids"].to(device), data["attention_mask"].to(device)
        start_positions, end_positions = data["start_positions"].to(device), data["end_positions"].to(device)
        optimizer.zero_grad()
        model.zero_grad()
        out = model(input_ids, attention_mask, start_positions, end_positions)
        loss = out["loss"]
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
        optimizer.step()
        scheduler.step()
        lr = optimizer.state_dict()["param_groups"][0]["lr"]
        start_offset = (out["start_logits"].argmax(dim=1) - start_positions).abs().sum()/len(start_positions)
        end_offset = (out["end_logits"].argmax(dim=1) - end_positions).abs().sum()/len(end_positions)
        print(f"{i}: loss:{loss} lr:{lr} start_offset:{start_offset} end_offset:{end_offset}")

train(model)
_test(model)