{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "26011dd2-535d-4408-b31f-30519ad172b6",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization. \n",
      "The tokenizer class you load from this checkpoint is 'GPT4Tokenizer'. \n",
      "The class this function is called from is 'GPT2TokenizerFast'.\n"
     ]
    }
   ],
   "source": [
    "# from transformers import GPT2TokenizerFast\n",
    "\n",
    "# tokenizer = GPT2TokenizerFast.from_pretrained('Xenova/text-embedding-ada-002')\n",
    "# assert tokenizer.encode('hello world') == [15339, 1917]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "ce48aafc-1ffe-429e-a2a5-73a03f718336",
   "metadata": {},
   "outputs": [],
   "source": [
    "# import tiktoken\n",
    "# enc = tiktoken.get_encoding(\"cl100k_base\")\n",
    "# assert enc.decode(enc.encode(\"hello world\")) == \"hello world\"\n",
    "\n",
    "# # To get the tokeniser corresponding to a specific model in the OpenAI API:\n",
    "# enc = tiktoken.encoding_for_model(\"gpt-4\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "40cfde48-c66e-4ce1-963f-3ce5f215d32b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[ 0.14317986 -0.23076142 -0.0139477  ...  0.03993538  0.10085302\n",
      "  -0.19941317]\n",
      " [ 0.03999361 -0.20410594 -0.01313243 ...  0.02634658 -0.20096195\n",
      "  -0.14515458]]\n"
     ]
    }
   ],
   "source": [
    "from sentence_transformers import SentenceTransformer\n",
    "sentences = [\"This is an example sentence\", \"Each sentence is converted\"]\n",
    "\n",
    "model = SentenceTransformer('sentence-transformers/paraphrase-multilingual-mpnet-base-v2')\n",
    "embeddings = model.encode(sentences)\n",
    "print(embeddings)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "ed8aa3b4-bf3a-466c-bac8-c815f6802377",
   "metadata": {},
   "outputs": [],
   "source": [
    "c = model.encode(\"你好世界\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "be7fde29-7cc8-40fa-9041-f8d70350d2f2",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "768"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(c)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "0f6ff590-2fc2-489d-82fa-dec2ba064882",
   "metadata": {},
   "outputs": [],
   "source": [
    "c = model.encode(\"hello world! nice to meet you!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "8c30b9e3-0b88-451a-9663-f47c0ed2a2aa",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "768"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(c)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9afa6a33-c9e4-4dce-93dc-7f61cf62642e",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
