from datasets import Dataset, load_dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForSeq2Seq, TrainingArguments, Trainer
# import torch
# from peft import LoraConfig, TaskType, get_peft_model

data_dir = '/data/datasets/customs/wiki_data.json'
pretrain_model_dir = "/data/models/modelscope/modelscope/Llama-2-7b-ms"
save_dir = '/data/logs/Llama-2-7b-ms_lora_tuning_8bit_wiki_data'
datasets = load_dataset('json', data_files=data_dir, split='train')
print(datasets)
# faiss 教程
# print(datasets.list_indexes())
# datasets.add_faiss_index(column="dengyunfei",batch_size=  1000)
# print(datasets.is_index_initialized('train'))
# 数据的选取和过滤
datasets = datasets.train_test_split(test_size=0.00001)
# datasets["train"].select([0,1])
# ds = datasets["train"]
# # print(ds[:3])
tokenizer = AutoTokenizer.from_pretrained(pretrain_model_dir)
# tokenizer.padding_side = "right"  # 一定要设置padding_side为right，否则batch大于1时可能不收敛
# tokenizer.pad_token_id = 2
#
#
# def process_func(example):
#     MAX_LENGTH = 512  # Llama分词器会将一个中文字切分为多个token，因此需要放开一些最大长度，保证数据的完整性
#     input_ids, attention_mask, labels = [], [], []
#     instruction = tokenizer(
#         "\n".join(["Human: " + example["instruction"], example["input"]]).strip() + "\n\nAssistant: ",
#         add_special_tokens=False)
#     response = tokenizer(example["output"], add_special_tokens=False)
#     input_ids = instruction["input_ids"] + response["input_ids"] + [tokenizer.eos_token_id]
#     attention_mask = instruction["attention_mask"] + response["attention_mask"] + [1]
#     labels = [-100] * len(instruction["input_ids"]) + response["input_ids"] + [tokenizer.eos_token_id]
#     if len(input_ids) > MAX_LENGTH:
#         input_ids = input_ids[:MAX_LENGTH]
#         attention_mask = attention_mask[:MAX_LENGTH]
#         labels = labels[:MAX_LENGTH]
#     return {
#         "input_ids": input_ids,
#         "attention_mask": attention_mask,
#         "labels": labels
#     }
#
#
# tokenized_ds = ds.map(process_func, remove_columns=ds.column_names)
#
# # 多卡情况，可以去掉device_map="auto"，否则会将模型拆开
# model = AutoModelForCausalLM.from_pretrained(pretrain_model_dir, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, device_map="auto",load_in_8bit=True)
#
# config = LoraConfig(task_type=TaskType.CAUSAL_LM, )
# model = get_peft_model(model, config)
# model.enable_input_require_grads()
# model = model.half()  # 当整个模型都是半精度时，需要将adam_epsilon调大
# model.print_trainable_parameters()
#
# args = TrainingArguments(
#     output_dir=save_dir,
#     per_device_train_batch_size=1,
#     gradient_accumulation_steps=8,
#     logging_steps=20,
#     num_train_epochs=100,
#     # 5、如果将Lora部分也设置为半精度，这里的adam_epsilon一定要设置成大于 5.96e-8 的值，否则会报错
#     adam_epsilon=1e-4,
#     save_strategy="epoch",
#     gradient_checkpointing=True
# )
#
# trainer = Trainer(
#     model=model,
#     args=args,
#     tokenizer=tokenizer,
#     # train_dataset=tokenized_ds.select(range(6000)),
#     train_dataset=tokenized_ds,
#     data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
# )
#
# trainer.train()
