import os
import torch
from datasets import DatasetDict
from transformers import (AutoTokenizer, AutoModelForQuestionAnswering, TrainingArguments,
                          Trainer, DefaultDataCollator, pipeline)
# 清空CUDA缓存，释放GPU内存
torch.cuda.empty_cache()
# 设置CUDA设备为GPU 0
torch.cuda.set_device(0)

# 数据集加载
# 如果可以联网，直接使用load_dataset进行加载
# datasets = load_dataset("cmrc2018", cache_dir="data")
# 如果无法联网，则使用下面的方式加载数据集
datasets = DatasetDict.load_from_disk("mrc_data")
print(datasets)
print(datasets["train"][1])
print("-----------------------------------------1---------------------------------------------------")
# 数据预处理
tokenizer = AutoTokenizer.from_pretrained("hfl/chinese-macbert-base")
print(tokenizer)
sample_dataset = datasets["train"].select(range(10))
tokenized_examples = tokenizer(text = sample_dataset["question"],
	text_pair = sample_dataset["context"],
	return_offsets_mapping = True,
	max_length = 256, truncation = "only_second", padding = "max_length")
print(tokenized_examples.keys())
print(tokenized_examples["offset_mapping"][0], len(tokenized_examples["offset_mapping"][0]))
offset_mapping = tokenized_examples.pop("offset_mapping")
for idx, offset in enumerate(offset_mapping):
	answer = sample_dataset[idx]["answers"]
	start_char = answer["answer_start"][0]
	end_char = start_char + len(answer["text"][0])
	# 定位答案在token中的起始位置和结束位置
	# 一种策略，我们要拿到context的起始和结束，然后从左右两侧向答案逼近

	context_start = tokenized_examples.sequence_ids(idx).index(1)
	context_end = tokenized_examples.sequence_ids(idx).index(None, context_start) - 1

	# 判断答案是否在context中
	if offset[context_end][1] < start_char or offset[context_start][0] > end_char:
		start_token_pos = 0
		end_token_pos = 0
	else:
		token_id = context_start
		while token_id <= context_end and offset[token_id][0] < start_char:
			token_id += 1
		start_token_pos = token_id
		token_id = context_end
		while token_id >= context_start and offset[token_id][1] > end_char:
			token_id -= 1
		end_token_pos = token_id

	print(answer, start_char, end_char, context_start, context_end, start_token_pos, end_token_pos)
	print("token answer decode:",
		tokenizer.decode(tokenized_examples["input_ids"][idx][start_token_pos: end_token_pos + 1]))


def process_func(examples):
	tokenized_examples = tokenizer(text = examples["question"],
		text_pair = examples["context"],
		return_offsets_mapping = True,
		max_length = 384, truncation = "only_second", padding = "max_length")
	offset_mapping = tokenized_examples.pop("offset_mapping")
	start_positions = []
	end_positions = []
	for idx, offset in enumerate(offset_mapping):
		answer = examples["answers"][idx]
		start_char = answer["answer_start"][0]
		end_char = start_char + len(answer["text"][0])
		# 定位答案在token中的起始位置和结束位置
		# 一种策略，我们要拿到context的起始和结束，然后从左右两侧向答案逼近
		context_start = tokenized_examples.sequence_ids(idx).index(1)
		context_end = tokenized_examples.sequence_ids(idx).index(None, context_start) - 1
		# 判断答案是否在context中
		if offset[context_end][1] < start_char or offset[context_start][0] > end_char:
			start_token_pos = 0
			end_token_pos = 0
		else:
			token_id = context_start
			while token_id <= context_end and offset[token_id][0] < start_char:
				token_id += 1
			start_token_pos = token_id
			token_id = context_end
			while token_id >= context_start and offset[token_id][1] > end_char:
				token_id -= 1
			end_token_pos = token_id
		start_positions.append(start_token_pos)
		end_positions.append(end_token_pos)

	tokenized_examples["start_positions"] = start_positions
	tokenized_examples["end_positions"] = end_positions
	return tokenized_examples


tokenied_datasets = datasets.map(process_func, batched = True, remove_columns = datasets["train"].column_names)
print(tokenied_datasets)
print("-----------------------------------------2---------------------------------------------------")
# 加载模型
model = AutoModelForQuestionAnswering.from_pretrained("hfl/chinese-macbert-base")
print(model)


# 确保模型的所有权重都是连续的
def ensure_weights_contiguous(model):
	for name, param in model.named_parameters():
		if not param.is_contiguous():
			print(f"Making {name} contiguous.")
			param.data = param.data.contiguous()


ensure_weights_contiguous(model)
print("-----------------------------------------3---------------------------------------------------")
# 配置TrainingArguments
args = TrainingArguments(
	output_dir = "models_for_qa",
	per_device_train_batch_size = 64,
	per_device_eval_batch_size = 128,
	gradient_accumulation_steps = 8,  # 梯度累积步数
	gradient_checkpointing = True,  # 启用梯度检查点以节省内存
	gradient_checkpointing_kwargs = {"use_reentrant": False},  # 推荐设置
	optim = "adafactor",  # 使用Adafactor优化器
	fp16 = True,  # 使用16位浮点数训练
	eval_strategy = "epoch",
	save_strategy = "epoch",
	logging_steps = 50,
	num_train_epochs = 1
)
print(args)
print("-----------------------------------------4---------------------------------------------------")
# 配置Trainer
trainer = Trainer(
	model = model,
	args = args,
	tokenizer = tokenizer,
	train_dataset = tokenied_datasets["train"],
	eval_dataset = tokenied_datasets["validation"],
	data_collator = DefaultDataCollator()
)
print(trainer)
print("-----------------------------------------5---------------------------------------------------")
# 模型训练
trainer.train()
print("-----------------------------------------6---------------------------------------------------")
# 模型预测
pipe = pipeline("question-answering", model = model, tokenizer = tokenizer, device = 0)
pipe(question = "小明在哪里上班？", context = "小明在北京上班。").print
print(pipe)
print("-----------------------------------------7---------------------------------------------------")