{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['I love NLP nlp', 'NLP is awesome', 'I hate spam']\n"
     ]
    }
   ],
   "source": [
    "# 分词\n",
    "import jieba\n",
    "import numpy as np\n",
    "\n",
    "text=\"我来到北京清华大学\"\n",
    "# seg_list=jieba.lcut(text)\n",
    "seg_list= [\"I love NLP nlp\", \"NLP is awesome\", \"I hate spam\"]\n",
    "\n",
    "print(seg_list)\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[0 0 0 1 2 0]\n",
      " [1 0 1 0 1 0]\n",
      " [0 1 0 0 0 1]]\n",
      "['awesome' 'hate' 'is' 'love' 'nlp' 'spam']\n"
     ]
    }
   ],
   "source": [
    "# 统计每个词的出现频率 Bag-of-Words\n",
    "from sklearn.feature_extraction.text import CountVectorizer\n",
    "\n",
    "vectorizer=CountVectorizer()\n",
    "x=vectorizer.fit_transform(seg_list)\n",
    "print(x.toarray())\n",
    "print(vectorizer.get_feature_names_out())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[0.         0.         0.         0.54935123 0.83559154 0.        ]\n",
      " [0.62276601 0.         0.62276601 0.         0.4736296  0.        ]\n",
      " [0.         0.70710678 0.         0.         0.         0.70710678]]\n",
      "['awesome' 'hate' 'is' 'love' 'nlp' 'spam']\n"
     ]
    }
   ],
   "source": [
    "# 原理：在BoW基础上，加权词频（TF）和逆文档频率（IDF），降低常见词的权重。\n",
    "# TF：(词在文档中出现的次数) / (文档总词数)\n",
    "# IDF：log(总文档数 / (包含该词的文档数 + 1))\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "vectorizer=TfidfVectorizer()\n",
    "x=vectorizer.fit_transform(seg_list)\n",
    "print(x.todense())\n",
    "print(vectorizer.get_feature_names_out())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from numpy import triu\n",
    "from gensim.models import Word2Vec\n",
    "\n",
    "sentences = [['I', 'love', 'nlp', 'nlp'], ['NLP', 'is', 'awesome'], ['I', 'hate', 'spam']]\n",
    "\n",
    "model = Word2Vec(sentences, vector_size=100, min_count=1, workers=4)\n",
    "\n",
    "print(model.wv['nlp'])\n",
    "\n",
    "print(model.wv.most_similar('nlp'))\n",
    "\n",
    "def text_to_vector(text):\n",
    "    words=text.split()\n",
    "    vectors=[model.wv[word] for word in words if word in model.wv]\n",
    "    return np.mean(vectors,axis=0) if vectors else np.zeros(100)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(384,)\n"
     ]
    }
   ],
   "source": [
    "from sentence_transformers import SentenceTransformer\n",
    "model=SentenceTransformer('all-MiniLM-L6-v2')\n",
    "sentence_vector=model.encode(\" I love nlp\")\n",
    "\n",
    "print(sentence_vector.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.2605]])"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s1=model.encode('teacher')\n",
    "s2=model.encode('apple')\n",
    "similar=model.similarity(s1,s2)\n",
    "similar"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[{'corpus_id': 1, 'score': 0.8368561267852783}, {'corpus_id': 0, 'score': 0.8264977931976318}]]\n"
     ]
    }
   ],
   "source": [
    "from sentence_transformers import util\n",
    "\n",
    "query='如何进行文本向量化'\n",
    "corpus=['文本向量化是自然语言处理中的一个重要步骤',\n",
    "        '文本向量化是将文本转换为数值向量的过程',\n",
    "        'NLP入门',\n",
    "        '深度学习基础']\n",
    "\n",
    "query_embedding = model.encode(query)\n",
    "corpus_embeddings = model.encode(corpus)\n",
    "\n",
    "hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=2)\n",
    "print(hits)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(3, 384)\n"
     ]
    }
   ],
   "source": [
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py",
   "language": "python",
   "name": "py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.23"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
