{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "c2ccafac-17d8-4ca3-8b52-b3e849ae1c2a",
   "metadata": {},
   "source": [
    "## Table Question Answering"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2b2d5994-242e-4240-a504-3f8607aba0c0",
   "metadata": {},
   "source": [
    "环境与依赖"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "15f7f9fa-599c-4442-9e58-921eafd852d4",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[WARNING] DEVICE(6166,e7fffd4ab020,python):2025-08-30-07:31:19.068.473 [mindspore/ccsrc/utils/dlopen_macro.h:165] DlsymAscend] Dynamically load symbol aclprofGetSupportedFeaturesV2 failed, result = /usr/local/Ascend/ascend-toolkit/latest/lib64/libmsprofiler.so: undefined symbol: aclprofGetSupportedFeaturesV2\n",
      "[WARNING] DEVICE(6166,e7fffd4ab020,python):2025-08-30-07:31:19.068.614 [mindspore/ccsrc/utils/dlopen_macro.h:165] DlsymAscend] Dynamically load symbol aclrtEventGetTimestamp failed, result = /usr/local/Ascend/ascend-toolkit/latest/lib64/libascendcl.so: undefined symbol: aclrtEventGetTimestamp\n",
      "/usr/local/miniconda3/lib/python3.9/site-packages/numpy/core/getlimits.py:499: UserWarning: The value of the smallest subnormal for <class 'numpy.float64'> type is zero.\n",
      "  setattr(self, word, getattr(machar, word).flat[0])\n",
      "/usr/local/miniconda3/lib/python3.9/site-packages/numpy/core/getlimits.py:89: UserWarning: The value of the smallest subnormal for <class 'numpy.float64'> type is zero.\n",
      "  return self._float_to_str(self.smallest_subnormal)\n",
      "/usr/local/miniconda3/lib/python3.9/site-packages/numpy/core/getlimits.py:499: UserWarning: The value of the smallest subnormal for <class 'numpy.float32'> type is zero.\n",
      "  setattr(self, word, getattr(machar, word).flat[0])\n",
      "/usr/local/miniconda3/lib/python3.9/site-packages/numpy/core/getlimits.py:89: UserWarning: The value of the smallest subnormal for <class 'numpy.float32'> type is zero.\n",
      "  return self._float_to_str(self.smallest_subnormal)\n",
      "[WARNING] ME(6166:255086652207136,MainProcess):2025-08-30-07:31:25.767.414 [mindspore/context.py:1402] For 'context.set_context', the parameter 'ascend_config' will be deprecated and removed in a future version. Please use the api mindspore.device_context.ascend.op_precision.precision_mode(),\n",
      "                                                       mindspore.device_context.ascend.op_precision.op_precision_mode(),\n",
      "                                                       mindspore.device_context.ascend.op_precision.matmul_allow_hf32(),\n",
      "                                                       mindspore.device_context.ascend.op_precision.conv_allow_hf32(),\n",
      "                                                       mindspore.device_context.ascend.op_tuning.op_compile() instead.\n",
      "Building prefix dict from the default dictionary ...\n",
      "Loading model from cache /tmp/jieba.cache\n",
      "Loading model cost 2.127 seconds.\n",
      "Prefix dict has been built successfully.\n"
     ]
    }
   ],
   "source": [
    "import re\n",
    "import pandas as pd\n",
    "from difflib import SequenceMatcher\n",
    "from mindspore import context\n",
    "from mindnlp.transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ec3fabfc-39c4-457e-992d-a15e596576e4",
   "metadata": {},
   "source": [
    "加载模型与 QA 管线"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "ff1e8244-0781-4077-b9d2-e7b6479cc2ec",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/HwHiAiUser/.local/lib/python3.9/site-packages/mindnlp/transformers/tokenization_utils_base.py:1526: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be depracted, and will be then set to `False` by default. \n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "MODEL = \"distilbert-base-uncased-distilled-squad\"\n",
    "tok = AutoTokenizer.from_pretrained(MODEL)\n",
    "mdl = AutoModelForQuestionAnswering.from_pretrained(MODEL)\n",
    "qa = pipeline(\n",
    "    \"question-answering\",\n",
    "    model=mdl,\n",
    "    tokenizer=tok,\n",
    "    max_answer_len=8,                 \n",
    "    handle_impossible_answer=True,\n",
    "    clean_up_tokenization_spaces=False\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "caa8df0e-94b3-4631-99ae-c58f38b006f4",
   "metadata": {},
   "source": [
    "通用工具函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "329902dd-1a0d-4fb0-8c1f-e301b44dce09",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算两个字符串相似度\n",
    "def ngram_sim(a: str, b: str) -> float:\n",
    "    return SequenceMatcher(None, str(a).lower(), str(b).lower()).ratio()\n",
    "    \n",
    "# 判断是否为数值列\n",
    "def is_numeric_series(s: pd.Series) -> bool:\n",
    "    s = s.astype(str).str.replace(\",\", \"\").str.replace(\"_\", \"\")\n",
    "    return pd.to_numeric(s, errors=\"coerce\").notna().mean() >= 0.7\n",
    "    \n",
    "# 把列转成可计算的数字\n",
    "def to_numeric(series: pd.Series) -> pd.Series:\n",
    "    s = series.astype(str).str.replace(\",\", \"\").str.replace(\"_\", \"\")\n",
    "    s = s.str.replace(r\"(?i)\\s*k$\", \"e3\", regex=True)\\\n",
    "         .str.replace(r\"(?i)\\s*m$\", \"e6\", regex=True)\\\n",
    "         .str.replace(r\"(?i)\\s*b$\", \"e9\", regex=True)\n",
    "    return pd.to_numeric(s, errors=\"coerce\")\n",
    "\n",
    "# 选出实体\n",
    "def pick_entity_col(df: pd.DataFrame):\n",
    "    best, col = -1.0, df.columns[0]\n",
    "    for c in df.columns:\n",
    "        if is_numeric_series(df[c]): \n",
    "            continue\n",
    "        ratio = df[c].nunique() / max(1, len(df))\n",
    "        if ratio > best:\n",
    "            best, col = ratio, c\n",
    "    return col\n",
    "\n",
    "# 在所有数值列里，找跟问题最相关的那一列\n",
    "def best_numeric_col(question: str, df: pd.DataFrame):\n",
    "    cands = [c for c in df.columns if is_numeric_series(df[c])]\n",
    "    if not cands:\n",
    "        return None\n",
    "    return max(cands, key=lambda c: ngram_sim(question, c))\n",
    "\n",
    "# 测量型问题要抽取数值，计数型问题才数行数，所以需要 looks_like_measurement_question 筛选出测量型问题来阻止误用 COUNT。\n",
    "def looks_like_measurement_question(question: str, df: pd.DataFrame) -> bool:\n",
    "    ql = question.lower()\n",
    "    for c in df.columns:\n",
    "        cname = str(c).lower()\n",
    "        if cname in ql and is_numeric_series(df[c]):\n",
    "            if re.search(r\"\\b(has|have|does)\\b\", ql) or re.search(r\"(是多少|有多少)\", question):\n",
    "                return True\n",
    "    return False"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c2f9da76-be6d-4351-a24b-1af4de89458a",
   "metadata": {},
   "source": [
    "表格序列化、切块与分块"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "3fae4fae-31a8-4291-9d9f-310b0d494ea8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将表格变成几行文本\n",
    "def serialize_table(df: pd.DataFrame) -> str:\n",
    "    cols = list(df.columns)\n",
    "    ent_col = pick_entity_col(df)\n",
    "    lines = []\n",
    "    DELIM_RE = r\"[,/;|、]|(?:\\s+and\\s+)|(?:\\s+与\\s+)|(?:\\s+和\\s+)\"\n",
    "\n",
    "    for _, r in df.iterrows():\n",
    "        ent = str(r[ent_col])\n",
    "        # 基础属性句\n",
    "        pieces = [f\"{c} is {r[c]}\" for c in cols]\n",
    "        lines.append(\". \".join(pieces) + f\". The {ent_col} {ent} has these attributes.\")\n",
    "\n",
    "        # 对所有非数值列补充同义句\n",
    "        for c in cols:\n",
    "            if c == ent_col or is_numeric_series(df[c]):\n",
    "                continue\n",
    "            cell = str(r[c]).strip()\n",
    "            if not cell:\n",
    "                continue\n",
    "\n",
    "            # 单值\n",
    "            lines.append(f\"The {ent} has {c} {cell}.\")\n",
    "            lines.append(f\"{ent} includes {cell} in {c}.\")\n",
    "            lines.append(f\"{ent} uses {cell}.\")\n",
    "            lines.append(f\"{cell} is the {c} of {ent}.\")\n",
    "\n",
    "            # 多值\n",
    "            toks = [t.strip() for t in re.split(DELIM_RE, cell, flags=re.IGNORECASE) if t and t.strip()]\n",
    "            if len(toks) > 1:\n",
    "                for t in toks:\n",
    "                    if re.fullmatch(r\"\\d+(\\.\\d+)?\", t) or len(t) > 40:\n",
    "                        continue\n",
    "                    lines.append(f\"The {ent} has {c} {t}.\")\n",
    "                    lines.append(f\"{ent} includes {t} in {c}.\")\n",
    "                    lines.append(f\"{ent} uses {t}.\")\n",
    "                    lines.append(f\"{t} is the {c} of {ent}.\")\n",
    "\n",
    "    return \"\\n\".join(lines)\n",
    "\n",
    "\n",
    "# 分块后逐块问，避免关键行被截断\n",
    "def chunk_by_lines(text: str, max_chars: int = 900):\n",
    "    lines = text.splitlines()\n",
    "    chunks, cur, cur_len = [], [], 0\n",
    "    for ln in lines:\n",
    "        if cur_len + len(ln) + 1 > max_chars and cur:\n",
    "            chunks.append(\"\\n\".join(cur)); cur, cur_len = [], 0\n",
    "        cur.append(ln); cur_len += len(ln) + 1\n",
    "    if cur:\n",
    "        chunks.append(\"\\n\".join(cur))\n",
    "    return chunks\n",
    "\n",
    "# 对每个上下文块都运行一遍问答，取 score 最高的那个作为最终答案\n",
    "def qa_best(question: str, context_text: str):\n",
    "    chunks = chunk_by_lines(context_text, max_chars=900)\n",
    "    best = None\n",
    "    for i, ctx in enumerate(chunks):\n",
    "        out = qa(question=question, context=ctx)\n",
    "        cand = out[0] if isinstance(out, list) else out\n",
    "        cand[\"_chunk_id\"] = i\n",
    "        if best is None or float(cand.get(\"score\", 0.0)) > float(best.get(\"score\", 0.0)):\n",
    "            best = cand\n",
    "    return best"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4eb190a8-81c0-4c9e-b78a-17d453667d0f",
   "metadata": {},
   "source": [
    "触发词+尝试聚合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "7d8a7642-02b9-4222-9bc2-5bd4210adef2",
   "metadata": {},
   "outputs": [],
   "source": [
    "TRIGGERS = {\n",
    "    \"SUM_EN\":   r\"\\b(total|sum)\\b\",\n",
    "    \"COUNT_EN\": r\"\\b(how many|count)\\b\",\n",
    "    \"MAX_EN\":   r\"\\b(max|maximum|largest|highest|most)\\b\",\n",
    "    \"MIN_EN\":   r\"\\b(min|minimum|smallest|lowest|least)\\b\",\n",
    "    \"SUM_ZH\":   r\"(合计|总计|总数)\",\n",
    "    \"COUNT_ZH\": r\"(多少|几(个|项|台|人|名)?)\",\n",
    "    \"MAX_ZH\":   r\"(最多|最大|最高)\",\n",
    "    \"MIN_ZH\":   r\"(最少|最小|最低)\",\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "9af6de04-0712-4faa-b319-99e6021d95cd",
   "metadata": {},
   "outputs": [],
   "source": [
    "def try_aggregate(question: str, df: pd.DataFrame):\n",
    "    q = question.lower()\n",
    "\n",
    "    # MAX / MIN\n",
    "    if re.search(TRIGGERS[\"MAX_EN\"], q) or re.search(TRIGGERS[\"MAX_ZH\"], question):\n",
    "        score_col = best_numeric_col(question, df) or df.columns[0]\n",
    "        vals = to_numeric(df[score_col])\n",
    "        idx = vals.idxmax()\n",
    "        ent_col = pick_entity_col(df)\n",
    "        return {\"mode\":\"AGG\", \"op\":\"ARGMAX\", \"answer\": str(df.iloc[idx][ent_col]),\n",
    "                \"score_col\": score_col, \"entity_col\": ent_col}\n",
    "\n",
    "    if re.search(TRIGGERS[\"MIN_EN\"], q) or re.search(TRIGGERS[\"MIN_ZH\"], question):\n",
    "        score_col = best_numeric_col(question, df) or df.columns[0]\n",
    "        vals = to_numeric(df[score_col])\n",
    "        idx = vals.idxmin()\n",
    "        ent_col = pick_entity_col(df)\n",
    "        return {\"mode\":\"AGG\", \"op\":\"ARGMIN\", \"answer\": str(df.iloc[idx][ent_col]),\n",
    "                \"score_col\": score_col, \"entity_col\": ent_col}\n",
    "\n",
    "    # SUM\n",
    "    if re.search(TRIGGERS[\"SUM_EN\"], q) or re.search(TRIGGERS[\"SUM_ZH\"], question):\n",
    "        col = best_numeric_col(question, df) or df.columns[0]\n",
    "        vals = to_numeric(df[col]).dropna()\n",
    "        return {\"mode\":\"AGG\", \"op\":\"SUM\", \"answer\": str(int(vals.sum())) if len(vals) else \"0\",\n",
    "                \"column\": col}\n",
    "\n",
    "    # COUNT（测量型问句使用抽取式QA）\n",
    "    if re.search(TRIGGERS[\"COUNT_EN\"], q) or re.search(TRIGGERS[\"COUNT_ZH\"], question):\n",
    "        if looks_like_measurement_question(question, df):\n",
    "            return None\n",
    "        kw = None\n",
    "        m = re.search(r\"use\\s+([a-z0-9\\.\\-\\+\\s]+)\", q) or re.search(r\"(?:语言|language)\\s*(?:为|是|=)\\s*([a-z0-9\\.\\-\\+]+)\", question, flags=re.IGNORECASE)\n",
    "        if m:\n",
    "            kw = m.group(1).strip().lower()\n",
    "        if kw:\n",
    "            cnt = 0\n",
    "            for _, row in df.iterrows():\n",
    "                if kw in \" \".join(map(str, row.values)).lower():\n",
    "                    cnt += 1\n",
    "            return {\"mode\":\"AGG\", \"op\":\"COUNT_KW\", \"answer\": str(cnt), \"keyword\": kw}\n",
    "        return {\"mode\":\"AGG\", \"op\":\"COUNT_ALL\", \"answer\": str(len(df))}\n",
    "    return None"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "920219a5-f3a6-45ae-b3c1-91db9f4d4049",
   "metadata": {},
   "source": [
    "表格问答主入口"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "9b03f968-a323-4b0b-a464-5f1ceabe7ca6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 能聚合的题（MAX/MIN/SUM/COUNT）先直接算，不能聚合就把表序列化成文本逐块运行抽取式 QA\n",
    "def answer_table_question(question: str, df: pd.DataFrame):\n",
    "    agg = try_aggregate(question, df)\n",
    "    if agg is not None:\n",
    "        return agg\n",
    "    context_text = serialize_table(df)\n",
    "    qa_out = qa_best(question, context_text) or {}\n",
    "    if \"answer\" in qa_out:\n",
    "        return {\"mode\":\"QA\", \"answer\": qa_out[\"answer\"], \"score\": qa_out.get(\"score\", 0.0)}\n",
    "    return {\"mode\":\"QA\", \"answer\": \"\"}"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "05d16633-342c-42ab-886a-5010567db67d",
   "metadata": {},
   "source": [
    "示例数据与问答"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "68e90408-5a23-40db-80a3-87cda06cd96e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Q: Which repository has the most stars?\n",
      "A: Transformers\n",
      "\n",
      "Q: How many stars does the Transformers repository have?\n",
      "A: 36,542\n",
      "\n",
      "Q: Which repository uses Rust?\n",
      "A: Tokenizers\n",
      "\n",
      "Q: What is the programming language of Datasets?\n",
      "A: Python\n",
      "\n",
      "Q: What is the total number of contributors?\n",
      "A: 762\n",
      "\n",
      "Q: How many repositories use Python?\n",
      "A: 3\n",
      "\n",
      "Q: 最少贡献者的是哪个仓库？\n",
      "A: Tokenizers\n",
      "\n",
      "Q: Stars 最多的是哪个仓库？\n",
      "A: Transformers\n"
     ]
    }
   ],
   "source": [
    "if __name__ == \"__main__\":\n",
    "    table_df = pd.DataFrame({\n",
    "        \"Repository\": [\"Transformers\", \"Datasets\", \"Tokenizers\"],\n",
    "        \"Stars\": [\"36,542\", \"4,512\", \"3,934\"],\n",
    "        \"Contributors\": [\"651\", \"77\", \"34\"],\n",
    "        \"Programming language\": [\"Python\", \"Python\", \"Rust, Python and NodeJS\"]\n",
    "    })\n",
    "\n",
    "    questions = [\n",
    "        \"Which repository has the most stars?\",\n",
    "        \"How many stars does the Transformers repository have?\",\n",
    "        \"Which repository uses Rust?\",\n",
    "        \"What is the programming language of Datasets?\",\n",
    "        \"What is the total number of contributors?\",\n",
    "        \"How many repositories use Python?\",\n",
    "        \"最少贡献者的是哪个仓库？\",\n",
    "        \"Stars 最多的是哪个仓库？\",\n",
    "    ]\n",
    "\n",
    "    for q in questions:\n",
    "        ans = answer_table_question(q, table_df)\n",
    "        extra = {k:v for k,v in ans.items() if k not in [\"answer\",\"mode\"]}\n",
    "        print(f\"\\nQ: {q}\\nA: {ans['answer']}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
