from datasets import load_dataset
from sentence_transformers import (
    SentenceTransformer,
    SentenceTransformerTrainer,
    SentenceTransformerTrainingArguments,
)
from sentence_transformers.losses import MultipleNegativesRankingLoss
from sentence_transformers.training_args import BatchSamplers
import logging
from torch.utils.data import random_split
from torch.utils.data.dataset import Dataset

# 设置日志级别为 INFO 或 DEBUG，获取更多信息
logging.basicConfig(level=logging.INFO)

# 加载模型 SentenceTransformer会自动识别npu设备
model = SentenceTransformer("/mnt/remote/model_weights/bge-large-zh-v1_5")

# 加载数据集
ds = load_dataset("/mnt/remote/dataset/financial-qa-10K", split="train")

class CustomDataset(Dataset):
    def __init__(self, data):
        self.data = data
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        # 返回问题和答案
        return self.data[idx]

# 创建自定义数据集
dataset = []
for example in ds:
    question = example['question']
    answer = example['answer']
    dataset.append((question, answer))

# 将数据集转换为自定义 Dataset 对象
dataset = CustomDataset(dataset)

# 计算切分的大小
train_size = int(0.9 * len(dataset))
test_size = len(dataset) - train_size

# 使用 random_split 切分数据集
train_dataset, test_dataset = random_split(dataset, [train_size, test_size])

# 为了确保 train_dataset 和 test_dataset 具有正确的列信息，我们重新转换它们为 Dataset 对象
train_dataset = ds.select([i for i in range(train_size)])  # 选择前 90% 数据
test_dataset = ds.select([i for i in range(train_size, len(ds))])  # 选择剩下的 10% 数据

# 定义损失函数
loss = MultipleNegativesRankingLoss(model)

# 配置训练参数
args = SentenceTransformerTrainingArguments(
    # Required parameter:
    output_dir="./out",
    # Optional training parameters:
    num_train_epochs=2,
    per_device_train_batch_size=16,
    per_device_eval_batch_size=16,
    learning_rate=2e-5,
    warmup_ratio=0.1,
    fp16=False,  # Set to False if you get an error that your GPU can't run on FP16
    bf16=True,  # Set to True if you have a GPU that supports BF16
    batch_sampler=BatchSamplers.NO_DUPLICATES,  # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
    # Optional tracking/debugging parameters:
    eval_strategy="steps",
    eval_steps=100,
    save_strategy="steps",
    save_steps=100,
    save_total_limit=2,
    logging_steps=100,
)

# 定义Trainer
trainer = SentenceTransformerTrainer(
    model=model,
    args=args,
    train_dataset=train_dataset,
    eval_dataset=test_dataset,  # 将测试集作为验证集
    loss=loss
)

# 训练模型
trainer.train()