import logging
import random
import traceback
from datetime import datetime

from datasets import load_dataset

from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import (
    BinaryClassificationEvaluator,
    InformationRetrievalEvaluator,
    ParaphraseMiningEvaluator,
    SequentialEvaluator,
)
from sentence_transformers.losses import OnlineContrastiveLoss
from sentence_transformers.losses.ContrastiveLoss import SiameseDistanceMetric
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import BatchSamplers, SentenceTransformerTrainingArguments

# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)


model_name = "gte_Qwen2-1.5B-instruct"
model_path = "/home/jovyan/pcj-backup/models/gte_Qwen2-1.5B-instruct/iic/gte_Qwen2-1.5B-instruct"
model = SentenceTransformer(model_path)

output_dir = f"output/training_ocl-{model_name}-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

################### Load  dataset ##################
dataset = load_dataset("json",data_files="train3_te.json")
# 因为是字典
dataset = dataset['train']
dataset = dataset.train_test_split(test_size=2000)
train_dataset = dataset["train"]
eval_dataset = dataset["test"]

# Negative pairs should have a distance of at least 0.5
margin = 0.5
# As distance metric, we use cosine distance (cosine_distance = 1-cosine_similarity)
distance_metric = SiameseDistanceMetric.COSINE_DISTANCE
train_loss = OnlineContrastiveLoss(model=model, distance_metric=distance_metric, margin=margin)

################### Development  Evaluators ##################
evaluators = []

###### Classification ######
binary_acc_evaluator = BinaryClassificationEvaluator(
    sentences1=eval_dataset["query"],
    sentences2=eval_dataset["response"],
    labels=eval_dataset["label"],
    name="semanic-search",
)
evaluators.append(binary_acc_evaluator)

# Create a SequentialEvaluator. This SequentialEvaluator runs all three evaluators in a sequential order.
# We optimize the model with respect to the score from the last evaluator (scores[-1])
seq_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])

logging.info("Evaluate model without training")
seq_evaluator(model, epoch=0, steps=0)


# Define the training arguments
# （1，64，0.5）
args = SentenceTransformerTrainingArguments(
    # Required parameter:
    output_dir=output_dir,
    # Optional training parameters:
    num_train_epochs=1,
    per_device_train_batch_size=64,
    per_device_eval_batch_size=64,
    warmup_ratio=0.1,
    fp16=True,  # Set to False if you get an error that your GPU can't run on FP16
    bf16=False,  # Set to True if you have a GPU that supports BF16
    batch_sampler=BatchSamplers.NO_DUPLICATES,  # OCL benefits from no duplicate samples in a batch
    # Optional tracking/debugging parameters:
    eval_strategy="steps",
    eval_steps=100,
    save_strategy="steps",
    save_steps=100,
    save_total_limit=3,
    logging_steps=100,
    run_name="online-contrastive-loss",  # Will be used in W&B if `wandb` is installed
)

# Create the trainer & start training
trainer = SentenceTransformerTrainer(
    model=model,
    args=args,
    train_dataset=train_dataset,
    eval_dataset=eval_dataset,
    loss=train_loss,
    evaluator=seq_evaluator,
)
trainer.train()

# Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
