import matplotlib.pyplot as plt
import numpy as np
from datasets import load_dataset
from transformers import pipeline
from tqdm import tqdm

# # Use a pipeline as a high-level helper
# pipe = pipeline("text-classification", model="./chatgpt-detector-roberta-chinese")

# # load data
# dataset = load_dataset("./HC3-Chinese", "all")["train"]

# data_human_gpt2_xl = []
# data_model_gpt2_xl = []


# for data in tqdm(dataset):
#     human_text = data["human_answers"]
#     chatgpt_text = data["chatgpt_answers"]
#     result = pipe(human_text + chatgpt_text)
#     data_human_gpt2_xl.append(result[0]["score"])
#     data_model_gpt2_xl.append(result[1]["score"])
#     break


# Sample data to simulate the plots
np.random.seed(0)
data_human_gpt2_xl = np.random.normal(0.15, 0.02, 1000)
data_model_gpt2_xl = np.random.normal(0.2, 0.02, 1000)


# Create the subplots
fig, axs = plt.subplots(1, 1, figsize=(10, 8))

# Define plot titles
titles = [
    "chatgpt",
]

# Plot data
axs.hist(data_human_gpt2_xl, bins=30, alpha=0.5, label="Human")
axs.hist(data_model_gpt2_xl, bins=30, alpha=0.5, label="Model")
axs.set_title(titles[0])

axs.set_xlabel("Log Likelihood Drop (Perturbation Discrepancy)")
axs.set_ylabel("Frequency")

# Add legend
fig.legend(["Human", "Model"], loc="upper right")

# Adjust layout
plt.tight_layout()
# save
fig.savefig("fig.png")
