{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import os\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from utils.sample_utils import Sample"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ground_truth_file = \"data/manual/filtered_unsafe_Qwen_GT.json\"\n",
    "prompt = \"Safe4U\"\n",
    "prompt = \"Safe4U\"\n",
    "model = \"Qwen2-7B-Instruct\"\n",
    "skip_hallucated = True\n",
    "result_file = f\"result/{model}/{prompt}/filtered_unsafe_result.json\"\n",
    "gt_list = json.load(open(ground_truth_file))\n",
    "pred_list = json.load(open(result_file))\n",
    "sample_dict = dict()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "gt_dict = {}\n",
    "for sample in gt_list:\n",
    "    gt_dict[sample[\"sample_label\"]] = sample[\"constraints\"]\n",
    "    sample_dict[sample[\"sample_label\"]] = Sample(sample)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## contract-level recall & precision"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cnt = pd.DataFrame(np.zeros((2, 3)), columns=[\"No\", \"Yes\", \"unknown\"], index=[\"expect_No\", \"expect_Yes\"])\n",
    "unsound_cnt = 0\n",
    "hallucinated_cnt = 0\n",
    "for r in pred_list:\n",
    "    sample_label = r[\"sample_label\"]\n",
    "    if r[\"result\"] != \"unsound\":\n",
    "        continue\n",
    "    unsound_cnt += 1\n",
    "    if sample_label not in gt_dict:\n",
    "        print(f\"Sample {sample_label} not found in ground truth\")\n",
    "        continue\n",
    "    gt = gt_dict[sample_label]\n",
    "    for c_idx, c in enumerate(r[\"response\"]):\n",
    "        expect = gt[c_idx]['expect']\n",
    "        if expect == \"Hallucinated\":\n",
    "            hallucinated_cnt += 1\n",
    "            if skip_hallucated:\n",
    "                continue\n",
    "            else:\n",
    "                expect = \"Yes\"\n",
    "        cnt.loc[f\"expect_{expect}\", c[\"result\"]] += 1\n",
    "\n",
    "# add unknown to No\n",
    "cnt.loc[\"expect_No\", \"No\"] += cnt.loc[\"expect_No\", \"unknown\"]\n",
    "cnt.loc[\"expect_Yes\", \"No\"] += cnt.loc[\"expect_Yes\", \"unknown\"]\n",
    "\n",
    "print(cnt)\n",
    "print(f\"Hallucination: {hallucinated_cnt}\")\n",
    "print(f\"Unsound: {unsound_cnt}\")\n",
    "print(f\"Recall: {cnt.loc['expect_No', 'No'] / (cnt.loc['expect_No', 'Yes'] + cnt.loc['expect_No', 'No'])}\")\n",
    "print(f\"Precision: {cnt.loc['expect_No', 'No'] / (cnt.loc['expect_No', 'No'] + cnt.loc['expect_Yes', 'No'])}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Function-level recall & precision\n",
    "- Recall: Localize unsound unsafe callees\n",
    "- Precision: The confidence of unsound prediction of unsafe callees."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def merge_result(now, new): \n",
    "    if now == \"No\":\n",
    "        return now\n",
    "    if now == \"Yes\":\n",
    "        return new\n",
    "    if new == \"No\":\n",
    "        return \"No\"\n",
    "    return \"unknown\"\n",
    "\n",
    "cnt = pd.DataFrame(np.zeros((2, 3)), columns=[\"No\", \"Yes\", \"unknown\"], index=[\"expect_No\", \"expect_Yes\"])\n",
    "unsafe_call_num = 0\n",
    "unsound_cnt = 0\n",
    "for r in pred_list:\n",
    "    sample_label = r[\"sample_label\"]\n",
    "    if r[\"result\"] != \"unsound\":\n",
    "        continue\n",
    "    unsound_cnt += 1\n",
    "    if sample_label not in gt_dict:\n",
    "        print(f\"Sample {sample_label} not found in ground truth\")\n",
    "        continue\n",
    "    unsafe_call_num += len(sample_dict[sample_label].unsafe_callees)\n",
    "    con_gt = gt_dict[sample_label]\n",
    "    unsafe_callees = dict()\n",
    "    for c_idx, c in enumerate(con_gt):\n",
    "        unsafe_callees[c[\"fn_name\"]] = {\"expect\": \"Yes\", \"pred\": \"Yes\"}\n",
    "    for c_idx, c in enumerate(con_gt):\n",
    "        expect = c[\"expect\"]\n",
    "        if expect == \"Hallucinated\":\n",
    "            if skip_hallucated:\n",
    "                continue\n",
    "            else:\n",
    "                expect = \"Yes\"\n",
    "        unsafe_callees[c[\"fn_name\"]][\"expect\"] = merge_result(unsafe_callees[c[\"fn_name\"]][\"expect\"], expect)\n",
    "        unsafe_callees[c[\"fn_name\"]][\"pred\"] = merge_result(unsafe_callees[c[\"fn_name\"]][\"pred\"], r[\"response\"][c_idx][\"result\"])\n",
    "    for f in unsafe_callees.values():\n",
    "        cnt.loc[f\"expect_{f['expect']}\", f[\"pred\"]] += 1\n",
    "\n",
    "print(f\"Unsafe call number: {unsafe_call_num}\")\n",
    "print(cnt)\n",
    "print(f\"Unsound: {unsound_cnt}\")\n",
    "print(f\"Recall: {cnt.loc['expect_No', 'No'] / (cnt.loc['expect_No', 'Yes'] + cnt.loc['expect_No', 'No'])}\")\n",
    "print(f\"Precision: {cnt.loc['expect_No', 'No'] / (cnt.loc['expect_No', 'No'] + cnt.loc['expect_Yes', 'No'])}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "unsafe_rust",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
