{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "d:\\conda\\envs\\evaluate_env\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "Downloading builder script: 4.20kB [00:00, 4.24MB/s]                   \n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "\n",
    "os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'\n",
    "\n",
    "import evaluate\n",
    "\n",
    "# 加载评估函数\n",
    "accuracy = evaluate.load(\"accuracy\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "EvaluationModule(name: \"accuracy\", module_type: \"metric\", features: {'predictions': Value('int32'), 'references': Value('int32')}, usage: \"\"\"\n",
       "Args:\n",
       "    predictions (`list` of `int`): Predicted labels.\n",
       "    references (`list` of `int`): Ground truth labels.\n",
       "    normalize (`boolean`): If set to False, returns the number of correctly classified samples. Otherwise, returns the fraction of correctly classified samples. Defaults to True.\n",
       "    sample_weight (`list` of `float`): Sample weights Defaults to None.\n",
       "\n",
       "Returns:\n",
       "    accuracy (`float` or `int`): Accuracy score. Minimum possible value is 0. Maximum possible value is 1.0, or the number of examples input, if `normalize` is set to `True`.. A higher score means higher accuracy.\n",
       "\n",
       "Examples:\n",
       "\n",
       "    Example 1-A simple example\n",
       "        >>> accuracy_metric = evaluate.load(\"accuracy\")\n",
       "        >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0])\n",
       "        >>> print(results)\n",
       "        {'accuracy': 0.5}\n",
       "\n",
       "    Example 2-The same as Example 1, except with `normalize` set to `False`.\n",
       "        >>> accuracy_metric = evaluate.load(\"accuracy\")\n",
       "        >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0], normalize=False)\n",
       "        >>> print(results)\n",
       "        {'accuracy': 3.0}\n",
       "\n",
       "    Example 3-The same as Example 1, except with `sample_weight` set.\n",
       "        >>> accuracy_metric = evaluate.load(\"accuracy\")\n",
       "        >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0], sample_weight=[0.5, 2, 0.7, 0.5, 9, 0.4])\n",
       "        >>> print(results)\n",
       "        {'accuracy': 0.8778625954198473}\n",
       "\"\"\", stored examples: 0)"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "accuracy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "可用的评估指标：\n",
      "111\n",
      "['Vipitis/shadermatch', 'lvwerra/test', 'angelina-wang/directional_bias_amplification', 'cpllab/syntaxgym', 'lvwerra/bary_score', 'hack/test_metric', 'yzha/ctc_eval', 'codeparrot/apps_metric', 'mfumanelli/geometric_mean', 'daiyizheng/valid', 'erntkn/dice_coefficient', 'mgfrantz/roc_auc_macro', 'Vlasta/pr_auc', 'gorkaartola/metric_for_tp_fp_samples', 'idsedykh/metric', 'idsedykh/codebleu2', 'idsedykh/codebleu', 'idsedykh/megaglue', 'Vertaix/vendiscore', 'GMFTBY/dailydialogevaluate', 'GMFTBY/dailydialog_evaluate', 'jzm-mailchimp/joshs_second_test_metric', 'ola13/precision_at_k', 'yulong-me/yl_metric', 'abidlabs/mean_iou', 'abidlabs/mean_iou2', 'KevinSpaghetti/accuracyk', 'NimaBoscarino/weat', 'ronaldahmed/nwentfaithfulness', 'Viona/infolm', 'kyokote/my_metric2', 'kashif/mape', 'Ochiroo/rouge_mn', 'leslyarun/fbeta_score', 'anz2/iliauniiccocrevaluation', 'zbeloki/m2', 'xu1998hz/sescore', 'dvitel/codebleu', 'NCSOFT/harim_plus', 'JP-SystemsX/nDCG', 'sportlosos/sescore', 'Drunper/metrica_tesi', 'jpxkqx/peak_signal_to_noise_ratio', 'jpxkqx/signal_to_reconstruction_error', 'hpi-dhc/FairEval', 'lvwerra/accuracy_score', 'ybelkada/cocoevaluate', 'harshhpareek/bertscore', 'posicube/mean_reciprocal_rank', 'bstrai/classification_report', 'omidf/squad_precision_recall', 'Josh98/nl2bash_m', 'BucketHeadP65/confusion_matrix', 'BucketHeadP65/roc_curve', 'yonting/average_precision_score', 'transZ/test_parascore', 'transZ/sbert_cosine', 'hynky/sklearn_proxy', 'xu1998hz/sescore_english_mt', 'xu1998hz/sescore_german_mt', 'xu1998hz/sescore_english_coco', 'xu1998hz/sescore_english_webnlg', 'unnati/kendall_tau_distance', 'Viona/fuzzy_reordering', 'Viona/kendall_tau', 'lhy/hamming_loss', 'lhy/ranking_loss', 'Muennighoff/code_eval_octopack', 'yuyijiong/quad_match_score', 'AlhitawiMohammed22/CER_Hu-Evaluation-Metrics', 'Yeshwant123/mcc', 'phonemetransformers/segmentation_scores', 'sma2023/wil', 'chanelcolgate/average_precision', 'ckb/unigram', 'Felipehonorato/eer', 'manueldeprada/beer', 'shunzh/apps_metric', 'He-Xingwei/sari_metric', 'langdonholmes/cohen_weighted_kappa', 'fschlatt/ner_eval', 'hyperml/balanced_accuracy', 'brian920128/doc_retrieve_metrics', 'guydav/restrictedpython_code_eval', 'k4black/codebleu', 'Natooz/ece', 'ingyu/klue_mrc', 'gabeorlanski/bc_eval', 'jjkim0807/code_eval', 'mtc/fragments', 'DarrenChensformer/eval_keyphrase', 'kdudzic/charmatch', 'Vallp/ter', 'DarrenChensformer/relation_extraction', 'Ikala-allen/relation_extraction', 'danieldux/hierarchical_softmax_loss', 'nlpln/tst', 'bdsaglam/jer', 'davebulaval/meaningbert', 'fnvls/bleu1234', 'fnvls/bleu_1234', 'nevikw39/specificity', 'yqsong/execution_accuracy', 'shalakasatheesh/squad_v2', 'arthurvqin/pr_auc', 'd-matrix/dmx_perplexity', 'akki2825/accents_unplugged_eval', 'juliakaczor/accents_unplugged_eval', 'Vickyage/accents_unplugged_eval', 'Qui-nn/accents_unplugged_eval', 'TelEl/accents_unplugged_eval', 'livvie/accents_unplugged_eval', 'DaliaCaRo/accents_unplugged_eval', 'alvinasvk/accents_unplugged_eval', 'LottieW/accents_unplugged_eval', 'sorgfresser/valid_efficiency_score', 'Fritz02/execution_accuracy', 'huanghuayu/multiclass_brier_score', 'jialinsong/apps_metric', 'DoctorSlimm/bangalore_score', 'agkphysics/ccc', 'DoctorSlimm/kaushiks_criteria', 'CZLC/rouge_raw', 'bascobasculino/mot-metrics', 'SEA-AI/mot-metrics', 'SEA-AI/det-metrics', 'saicharan2804/my_metric', 'red1bluelost/evaluate_genericify_cpp', 'maksymdolgikh/seqeval_with_fbeta', 'Bekhouche/NED', 'danieldux/isco_hierarchical_accuracy', 'ginic/phone_errors', 'berkatil/map', 'DarrenChensformer/action_generation', 'buelfhood/fbeta_score', 'danasone/ru_errant', 'helena-balabin/youden_index', 'SEA-AI/panoptic-quality', 'SEA-AI/box-metrics', 'MathewShen/bleu', 'berkatil/mrr', 'BridgeAI-Lab/SemF1', 'SEA-AI/horizon-metrics', 'maysonma/lingo_judge_metric', 'dannashao/span_metric', 'Aye10032/loss_metric', 'ag2435/my_metric', 'kilian-group/arxiv_score', 'bomjin/code_eval_octopack', 'svenwey/logmetric', 'bowdbeg/matching_series', 'BridgeAI-Lab/Sem-nCG', 'bowdbeg/patch_series', 'venkatasg/gleu', 'kbmlcoding/apps_metric', 'jijihuny/ecqa', 'prajwall/mse', 'd-matrix/dmxMetric', 'dotkaio/competition_math', 'bowdbeg/docred', 'Remeris/rouge_ru', 'jarod0411/aucpr', 'Ruchin/jaccard_similarity', 'phucdev/blanc_score', 'NathanMad/bertscore-with-torch_dtype', 'cointegrated/blaser_2_0_qe', 'ahnyeonchan/Alignment-and-Uniformity', 'Baleegh/Fluency_Score', 'mdocekal/multi_label_precision_recall_accuracy_fscore', 'phucdev/vihsd', 'argmaxinc/detailed-wer', 'SEA-AI/user-friendly-metrics', 'hage2000/code_eval_stdio', 'hage2000/my_metric', 'Natooz/levenshtein', 'Khaliq88/execution_accuracy', 'pico-lm/perplexity', 'mtzig/cross_entropy_loss', 'kiracurrie22/precision', 'openpecha/bleurt', 'SEA-AI/ref-metrics', 'Natooz/mse', 'buelfhood/fbeta_score_2', 'murinj/hter', 'pico-lm/blimp', 'nobody4/waf_metric', 'mdocekal/precision_recall_fscore_accuracy', 'Glazkov/mars', 'Aye10032/top5_error_rate', 'nhop/L3Score', 'maryxm/code_eval', 'Usmankiani256/meteor', 'sign/signwriting_similarity', 'maqiuping59/table_markdown', 'aynetdia/semscore', 'Dnfs/awesome_metric', 'AIML-TUDA/VerifiableRewardsForScalableLogicalReasoning', 'LG-Anonym/VerifiableRewardsForScalableLogicalReasoning', 'Bekhouche/ACC', 'sunhill/clip_score', 'sunhill/spice', 'sunhill/cider', 'ncoop57/levenshtein_distance', 'kaleidophon/almost_stochastic_order', 'NeuraFusionAI/Arabic-Evaluation', 'lvwerra/element_count', 'prb977/cooccurrence_count', 'NimaBoscarino/pseudo_perplexity', 'ybelkada/toxicity', 'ronaldahmed/ccl_win', 'christopher/tokens_per_byte', 'lsy641/distinct', 'grepLeigh/perplexity', 'Charles95/element_count', 'Charles95/accuracy', 'Lucky28/honest']\n"
     ]
    }
   ],
   "source": [
    "# 查看所有可用的指标\n",
    "print(\"可用的评估指标：\")\n",
    "metrics = evaluate.list_evaluation_modules()\n",
    "print(111)\n",
    "print(metrics)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[]"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 不查看社区的评估指标，只查看官方的评估指标\n",
    "evaluate.list_evaluation_modules(include_community=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<module 'evaluate' from 'd:\\\\conda\\\\envs\\\\evaluate_env\\\\lib\\\\site-packages\\\\evaluate\\\\__init__.py'>"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "evaluate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Downloading builder script: 6.79kB [00:00, 6.82MB/s]                   \n"
     ]
    }
   ],
   "source": [
    "f1 = evaluate.load(\"f1\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "EvaluationModule(name: \"f1\", module_type: \"metric\", features: {'predictions': Value('int32'), 'references': Value('int32')}, usage: \"\"\"\n",
       "Args:\n",
       "    predictions (`list` of `int`): Predicted labels.\n",
       "    references (`list` of `int`): Ground truth labels.\n",
       "    labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n",
       "    pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n",
       "    average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n",
       "\n",
       "        - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n",
       "        - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n",
       "        - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n",
       "        - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n",
       "        - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n",
       "    sample_weight (`list` of `float`): Sample weights Defaults to None.\n",
       "\n",
       "Returns:\n",
       "    f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n",
       "\n",
       "Examples:\n",
       "\n",
       "    Example 1-A simple binary example\n",
       "        >>> f1_metric = evaluate.load(\"f1\")\n",
       "        >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n",
       "        >>> print(results)\n",
       "        {'f1': 0.5}\n",
       "\n",
       "    Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n",
       "        >>> f1_metric = evaluate.load(\"f1\")\n",
       "        >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n",
       "        >>> print(round(results['f1'], 2))\n",
       "        0.67\n",
       "\n",
       "    Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n",
       "        >>> f1_metric = evaluate.load(\"f1\")\n",
       "        >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n",
       "        >>> print(round(results['f1'], 2))\n",
       "        0.35\n",
       "\n",
       "    Example 4-A multiclass example, with different values for the `average` input.\n",
       "        >>> predictions = [0, 2, 1, 0, 0, 1]\n",
       "        >>> references = [0, 1, 2, 0, 1, 2]\n",
       "        >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n",
       "        >>> print(round(results['f1'], 2))\n",
       "        0.27\n",
       "        >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n",
       "        >>> print(round(results['f1'], 2))\n",
       "        0.33\n",
       "        >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n",
       "        >>> print(round(results['f1'], 2))\n",
       "        0.27\n",
       "        >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n",
       "        >>> print(results)\n",
       "        {'f1': array([0.8, 0. , 0. ])}\n",
       "\n",
       "    Example 5-A multi-label example\n",
       "        >>> f1_metric = evaluate.load(\"f1\", \"multilabel\")\n",
       "        >>> results = f1_metric.compute(predictions=[[0, 1, 1], [1, 1, 0]], references=[[0, 1, 1], [0, 1, 0]], average=\"macro\")\n",
       "        >>> print(round(results['f1'], 2))\n",
       "        0.67\n",
       "\"\"\", stored examples: 0)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "f1"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "evaluate_env",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "37990cb7e14a4280d29cc47a919e2d55f3bfd74dc413cee212f628168b6511e1"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
