{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'jieba'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[1], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mjieba\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m cut_for_search\n\u001b[0;32m      2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mmath\u001b[39;00m\n\u001b[0;32m      3\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mre\u001b[39;00m\n",
      "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'jieba'"
     ]
    }
   ],
   "source": [
    "from jieba import cut_for_search\n",
    "import math\n",
    "import re\n",
    "from Search.getdata import word_s, word_s_title, TF, IDF, TF_Title, IDF_Title, information, page_rank\n",
    "import time\n",
    "\n",
    "def calculate_tf(word_list, document_content):\n",
    "    term_frequency = dict.fromkeys(word_list, 0)\n",
    "    for word in document_content:\n",
    "        if word in word_list:\n",
    "            term_frequency[word] += 1\n",
    "    for word, count in term_frequency.items():\n",
    "        term_frequency[word] = math.log10(count + 1)\n",
    "    return term_frequency\n",
    "\n",
    "def calculate_tfidf(tf_dict, idf_dict):\n",
    "    tfidf_scores = {}\n",
    "    for word, count in tf_dict.items():\n",
    "        tfidf_scores[word] = count * idf_dict.get(word, 0) \n",
    "    return tfidf_scores\n",
    "\n",
    "def calculate_vector_length(vector):\n",
    "    if not vector:\n",
    "        return 0  \n",
    "    length = 0\n",
    "    for i in range(len(vector)):\n",
    "        length += vector[i][1] ** 2  \n",
    "    return round(math.sqrt(length), 2)\n",
    "\n",
    "def base_search(query: str, search_titles_only: bool = True, top_n: int = 100):\n",
    "    \n",
    "    regex = r'[\\.\\^\\$\\*\\+\\?\\{\\}\\[\\]\\|\\(\\)]'\n",
    "    original_query = query\n",
    "    is_regex = re.search(regex, query)\n",
    "    if is_regex is not None:\n",
    "        query = re.sub(regex, '', query)\n",
    "    tokenized_query = sorted(list(cut_for_search(query)))\n",
    "    tokenized_query = [term for term in tokenized_query if term not in [\"\", \" \"]]\n",
    "\n",
    "    \n",
    "    if search_titles_only:\n",
    "        tf_dict = TF_Title\n",
    "        idf_dict = IDF_Title\n",
    "        word_list = word_s_title\n",
    "    else:\n",
    "        tf_dict = TF\n",
    "        idf_dict = IDF\n",
    "        word_list = word_s\n",
    "\n",
    "   \n",
    "    tfidf_scores_dict = {}\n",
    "    for doc_id, tf_values in tf_dict.items():\n",
    "        tfidf_scores_dict[doc_id] = calculate_tfidf(tf_values, idf_dict)\n",
    "\n",
    "\n",
    "    query_tf = calculate_tf(word_list, tokenized_query)\n",
    "    query_tfidf = calculate_tfidf(query_tf, idf_dict)\n",
    "    query_vector = sorted(query_tfidf.items(), key=lambda item: item[1], reverse=True)[:top_n]\n",
    "    query_vector_length = calculate_vector_length(query_vector)\n",
    "\n",
    "\n",
    "    if query_vector_length == 0:\n",
    "        raise KeyError(\"No valid keywords found.\")\n",
    "\n",
    "   \n",
    "    search_results = []  \n",
    "    for doc_id in tfidf_scores_dict:\n",
    "        temp_vector = tfidf_scores_dict[doc_id]\n",
    "    \n",
    "        similarity_score = 0\n",
    "        for term, score in query_vector:\n",
    "            if score != 0:\n",
    "                for word, value in temp_vector.items():\n",
    "                    if term == word:\n",
    "                        similarity_score += score * value\n",
    "\n",
    "        similarity = round(similarity_score / (query_vector_length * calculate_vector_length(list(temp_vector.items()))), 4)\n",
    "        if similarity > 0:\n",
    "            search_results.append((doc_id, similarity))\n",
    "\n",
    "  \n",
    "    search_results = sorted(search_results, key=lambda item: item[1], reverse=True)\n",
    "\n",
    "  \n",
    "    filtered_results = []\n",
    "    for result in search_results:\n",
    "        if result[1] > 0:\n",
    "            doc_info = information.loc[result[0]]\n",
    "            if re.search(original_query, str(doc_info.title)) is not None or re.search(original_query, str(doc_info.description)) is not None:\n",
    "                filtered_results.append((result[0], result[1]))\n",
    "\n",
    "    return filtered_results\n",
    "\n",
    "def test_search_performance(query: str):\n",
    "    start_time = time.time()\n",
    "    results = base_search(query)\n",
    "    end_time = time.time()\n",
    "    print(\"Search results:\")\n",
    "    for item in results:\n",
    "        print(item)\n",
    "    print(f\"Response time: {end_time - start_time:.2f} seconds, returned {len(results)} results.\")\n",
    "\n",
    "def expand_search_results(search_results: list):\n",
    "    expanded_list = []\n",
    "    for result in search_results:\n",
    "        doc_id = result[0]\n",
    "        doc_info = information.loc[doc_id].fillna('')\n",
    "        title = str(doc_info['title']).replace(\"_\", \"/\")\n",
    "        description = str(doc_info['description'])\n",
    "       \n",
    "        score = result[1] * 0.7 + 0.3 * page_rank.loc[doc_id]['page_rank']\n",
    "        expanded_list.append((title, doc_id, description, score))\n",
    "\n",
    "    return sorted(expanded_list, key=lambda item: item[-1], reverse=True)\n",
    "\n",
    "def test_expand_search_results(query: str):\n",
    "    search_results = base_search(query, True)\n",
    "    expanded_results = expand_search_results(search_results)\n",
    "\n",
    "\n",
    "test_expand_search_results(\"运动会\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ai_learn",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
