{
 "metadata": {
  "kernelspec": {
   "language": "python",
   "display_name": "Python 3",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.7.12",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  }
 },
 "nbformat_minor": 4,
 "nbformat": 4,
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "```\n",
    "知识图谱: 实体,属性,关系\n",
    "用途: 路径发现, 关联分析\n",
    "区别于关系型数据库(表:实体+属性, 表外键关联:关系), 直接用图结构表示实体+属性+关系, 方便多度关系查询, 提供了原先机器学习难以获取的维度\n",
    "\n",
    "建立图谱, 建立schema(类似关系型数据库需要建立er关系图)\n",
    "\n",
    "同义词: 处理意思相同的数据(人行 -> 中国人民银行)\n",
    "\n",
    "风控常用 XGBoost\n",
    "```"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# 使用 https://www.kaggle.com/code 页面进行测试(机器学习的比赛/学习/测试网站)\n",
    "# This Python 3 environment comes with many helpful analytics libraries installed\n",
    "# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n",
    "# For example, here's several helpful packages to load\n",
    "!pip install transformers"
   ],
   "metadata": {
    "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5",
    "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
    "execution": {
     "iopub.status.busy": "2022-05-25T14:27:31.015091Z",
     "iopub.execute_input": "2022-05-25T14:27:31.015414Z",
     "iopub.status.idle": "2022-05-25T14:27:43.990024Z",
     "shell.execute_reply.started": "2022-05-25T14:27:31.015383Z",
     "shell.execute_reply": "2022-05-25T14:27:43.989078Z"
    },
    "trusted": true
   },
   "execution_count": 4,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": "query = ('姚明的身高是多少')\ns1 = ('姚明-身高-2米')\ns2 = ('易建联-身高-2米1')",
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-05-25T14:40:47.366999Z",
     "iopub.execute_input": "2022-05-25T14:40:47.367725Z",
     "iopub.status.idle": "2022-05-25T14:40:47.374649Z",
     "shell.execute_reply.started": "2022-05-25T14:40:47.367670Z",
     "shell.execute_reply": "2022-05-25T14:40:47.373634Z"
    },
    "trusted": true
   },
   "execution_count": 1,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": "from transformers import AutoTokenizer, AutoModel \ntokenizer = AutoTokenizer.from_pretrained(\"peterchou/simbert-chinese-base\")\nmodel = AutoModel.from_pretrained(\"peterchou/simbert-chinese-base\")",
   "metadata": {
    "_kg_hide-input": true,
    "_kg_hide-output": true,
    "execution": {
     "iopub.status.busy": "2022-05-25T14:27:52.397488Z",
     "iopub.execute_input": "2022-05-25T14:27:52.397889Z",
     "iopub.status.idle": "2022-05-25T14:28:53.308218Z",
     "shell.execute_reply.started": "2022-05-25T14:27:52.397844Z",
     "shell.execute_reply": "2022-05-25T14:28:53.306737Z"
    },
    "trusted": true
   },
   "execution_count": 5,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": "inputs = tokenizer([s1], return_tensors=\"pt\",padding=True)\nprint(inputs)\noutputs = model(**inputs)\nlast_hidden_states = outputs.last_hidden_state\nstart1 = last_hidden_states[0,0,:] # [batch_size, length, dimension] -> [1, 1, dimension]\nend1 = last_hidden_states[0,-1,:]\ninputs = tokenizer([s2], return_tensors=\"pt\",padding=True)\noutputs = model(**inputs)\nlast_hidden_states = outputs.last_hidden_state\nprint(last_hidden_states.shape) # [batch_size, length, dimension]\nstart2 = last_hidden_states[0,0,:]\nend2 = last_hidden_states[0,-1,:]",
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-05-25T14:40:51.385284Z",
     "iopub.execute_input": "2022-05-25T14:40:51.386297Z",
     "iopub.status.idle": "2022-05-25T14:40:51.561828Z",
     "shell.execute_reply.started": "2022-05-25T14:40:51.386244Z",
     "shell.execute_reply": "2022-05-25T14:40:51.560873Z"
    },
    "trusted": true
   },
   "execution_count": 8,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": "import torch\nresult = torch.cosine_similarity(start1, start2,dim=0)\nprint('start score:',result)\nresult = torch.cosine_similarity(end1, end2,dim=0)\nprint('end score',result)",
   "metadata": {
    "trusted": true
   },
   "execution_count": 9,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": "!pip install sentence_transformers",
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-05-25T14:49:53.916091Z",
     "iopub.execute_input": "2022-05-25T14:49:53.916456Z",
     "iopub.status.idle": "2022-05-25T14:50:06.510511Z",
     "shell.execute_reply.started": "2022-05-25T14:49:53.916421Z",
     "shell.execute_reply": "2022-05-25T14:50:06.509652Z"
    },
    "trusted": true
   },
   "execution_count": 10,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": "from sentence_transformers import SentenceTransformer,util\n## cache_folder指定保存路径\nmodel1 = SentenceTransformer('peterchou/simbert-chinese-base')",
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-05-25T14:50:17.650047Z",
     "iopub.execute_input": "2022-05-25T14:50:17.650394Z",
     "iopub.status.idle": "2022-05-25T14:50:41.371142Z",
     "shell.execute_reply.started": "2022-05-25T14:50:17.650359Z",
     "shell.execute_reply": "2022-05-25T14:50:41.370236Z"
    },
    "trusted": true
   },
   "execution_count": 11,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": "embeddings1 = model1.encode(s1)\nembeddings2 = model1.encode(s2)\nprint(type(embeddings1))\n\ncos_sim = util.cos_sim(embeddings1, embeddings2)\nprint(\"Cosine-Similarity:\", cos_sim)",
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-05-25T14:52:06.059076Z",
     "iopub.execute_input": "2022-05-25T14:52:06.059860Z",
     "iopub.status.idle": "2022-05-25T14:52:06.276768Z",
     "shell.execute_reply.started": "2022-05-25T14:52:06.059819Z",
     "shell.execute_reply": "2022-05-25T14:52:06.275647Z"
    },
    "trusted": true
   },
   "execution_count": 13,
   "outputs": []
  }
 ]
}