import os
import torch
import evaluate
from evaluate.visualization import radar_plot  # 目前只支持雷达图
from matplotlib import pyplot as plt
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
torch.cuda.empty_cache()
torch.cuda.set_device(0)

# 所有可用的评估模块名称。
print(evaluate.list_evaluation_modules())
print("---------------------------------------1------------------------------------------")
# 列出所有官方的 comparison 类型评估模块的详细信息 根据不同参数列出
print(evaluate.list_evaluation_modules(
	module_type = "comparison",  # 模块类型
	include_community = False,  # 是否包含社区贡献的评估函数
	with_details = True))  # 是否显示详细信息
print("---------------------------------------2------------------------------------------")
# 加载评估函数 根据模糊搜索到评估函数先本地后尝试从Hugging Face Hub下载
accuracy = evaluate.load("accuracy")
print(accuracy)  # 显示案例
print("---------------------------------------3------------------------------------------")
# 查看函数说明
print(accuracy.description)  # 获取 accuracy 指标的描述
print(accuracy.inputs_description)  # 获取 accuracy 指标的输入
print("---------------------------------------4------------------------------------------")
# 评估指标计算——全局计算
accuracy = evaluate.load("accuracy")
results1 = accuracy.compute(references = [0, 1, 2, 0, 1, 2], predictions = [0, 1, 1, 2, 1, 0])
print(results1)
print("---------------------------------------5------------------------------------------")
# 评估指标计算——迭代计算
accuracy = evaluate.load("accuracy")
for ref, pred in zip([0, 1, 0, 1], [1, 0, 0, 1]):
	accuracy.add(references = ref, predictions = pred)
results2 = accuracy.compute()
print(results2)
print("----------------------------------------------------------------------------------")
accuracy = evaluate.load("accuracy")
for refs, preds in zip([[0, 1], [0, 1]], [[1, 0], [0, 1]]):
	accuracy.add_batch(references = refs, predictions = preds)
results3 = accuracy.compute()
print(results3)
print("---------------------------------------6--------------------------------------------")
# 多个评估指标计算
clf_metrics = evaluate.combine(["accuracy", "f1", "recall", "precision"])
print(clf_metrics)
# 多个评估指标（accuracy、f1、recall、precision）进行计算
# 使用提供的预测值和参考值（真实值）来计算各项指标
results4 = clf_metrics.compute(predictions = [0, 1, 0], references = [0, 1, 1])
print(results4)
print("---------------------------------------7------------------------------------------")
# 评估结果对比可视化
data = [
	{"accuracy": 0.99, "precision": 0.8, "f1": 0.95, "latency_in_seconds": 33.6},
	{"accuracy": 0.98, "precision": 0.87, "f1": 0.91, "latency_in_seconds": 11.2},
	{"accuracy": 0.98, "precision": 0.78, "f1": 0.88, "latency_in_seconds": 87.6},
	{"accuracy": 0.88, "precision": 0.78, "f1": 0.81, "latency_in_seconds": 101.6}
]
model_names = ["Model 1", "Model 2", "Model 3", "Model 4"]
plot = radar_plot(data=data, model_names=model_names)
# 显示图表
plt.show()
print("--------------------------------------8-------------------------------------------")