{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['haohaidong' 'son' 'work']\n",
      "[0.66666667 0.33333333 0.66666667]\n",
      "提取的关键词: ['work', 'haohaidong', 'son']\n"
     ]
    }
   ],
   "source": [
    "# 方法一：按词频找出关键字（英文） #\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "\n",
    "def extract_keywords(query):\n",
    "    # 创建一个TfidfVectorizer对象，设置stop_words='english'以排除常见的英语停用词（如“the”，“is”等）。\n",
    "    vectorizer = TfidfVectorizer(stop_words='english') \n",
    "\n",
    "    # 将query拟合并转换为TF-IDF特征矩阵。这里的输入是一个包含单个查询的列表。\n",
    "    X = vectorizer.fit_transform([query])   \n",
    "\n",
    "    # 获取特征名称（即词汇表中的单词）\n",
    "    feature_names = vectorizer.get_feature_names_out()  \n",
    "    print(feature_names)\n",
    "\n",
    "    # 将TF-IDF矩阵转换为数组并展平，以便于后续处理。\n",
    "    tfidf_scores = X.toarray().flatten()    \n",
    "    print(tfidf_scores)\n",
    "\n",
    "     # 取TF-IDF分数前三的\n",
    "    keywords = [feature_names[i] for i in tfidf_scores.argsort()[-3:][::-1]]   \n",
    "    return keywords\n",
    "\n",
    "query = \"what is Haohaidong work for, is Haohaidong my son for work?\"\n",
    "print(f\"提取的关键词: {extract_keywords(query)}\")\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['什么' '工作' '是不是' '海东']\n",
      "[0.37796447 0.75592895 0.37796447 0.37796447]\n",
      "提取的关键词: ['工作', '海东', '是不是']\n"
     ]
    }
   ],
   "source": [
    "# 方法二：按词频找出关键字（中文） #\n",
    "import jieba\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "\n",
    "def extract_keywords(query):\n",
    "    # 使用jieba进行中文分词\n",
    "    query_cut = ' '.join(jieba.cut(query))\n",
    "    \n",
    "    # 创建TfidfVectorizer对象\n",
    "    vectorizer = TfidfVectorizer()\n",
    "    \n",
    "    # 拟合并转换分词后的查询\n",
    "    X = vectorizer.fit_transform([query_cut])\n",
    "    \n",
    "    # 获取特征名称\n",
    "    feature_names = vectorizer.get_feature_names_out()\n",
    "    print(feature_names)\n",
    "\n",
    "    # 获取TF-IDF分数\n",
    "    tfidf_scores = X.toarray().flatten()\n",
    "    print(tfidf_scores)\n",
    "    \n",
    "    # 提取关键词\n",
    "    keywords = [feature_names[i] for i in tfidf_scores.argsort()[-3:][::-1]]\n",
    "    \n",
    "    return keywords\n",
    "\n",
    "query = \"赫海东是做什么工作的？是不是没工作？\"\n",
    "print(f\"提取的关键词: {extract_keywords(query)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['一规三则', '审查', '谁']\n"
     ]
    }
   ],
   "source": [
    "# 方法三：大模型做关键术语提取 #\n",
    "from openai import OpenAI\n",
    "\n",
    "client = OpenAI(\n",
    "    #api_key=os.getenv(\"DASHSCOPE_API_KEY\"),\n",
    "    # api_key=\"sk-b571bfbe652b4ec68ac0491e33949622\", # 这种写法不好，泄露了api-key。 回头正式部署时改掉。\n",
    "    # base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    "    api_key=\"ollama\",\n",
    "    base_url=\"http://192.168.20.43:11434/v1\"\n",
    ")\n",
    "\n",
    "def llm_extract_keywords(query):\n",
    "    prompt = \"\"\n",
    "\n",
    "    query_new = f\"\"\"\n",
    "    # BACKGROUND #\\\n",
    "    停用词（Stop Words）是指在自然语言处理中，那些过于常见但对理解文本意义不大的词汇。常见的停用词包括“的”、“是”、“在”、“和”等。这些词在文本中频繁出现，但对文本的主要内容和意图贡献较小。\\\n",
    "    ----------------\\\n",
    "    # OBJECTIVE #\\\n",
    "    I have the following context.\n",
    "    {query}\n",
    "    基于该query的信息，请忽略掉停用词并仅提取query中的术语、名词等关键词，然后以数组格式输出，每个关键词用单引号括起来，用逗号分隔，例如：['猫','鱼','动物种类']。\n",
    "    \"\"\"\n",
    "    completion = client.chat.completions.create(\n",
    "        model=\"qwen2-7b-instruct\",\n",
    "        messages=[{'role': 'system', 'content': prompt},\n",
    "                  {'role': 'user', 'content': query_new}],\n",
    "        )\n",
    "    return completion.choices[0].message.content\n",
    "\n",
    "\n",
    "# 测试用例\n",
    "query = \"一规三则是谁审查的？\"   \n",
    "keywords = llm_extract_keywords(query) \n",
    "print(keywords)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
