{ "cells": [ { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# 1. Transformer Models" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "import transformers" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Transformers, what can they do?" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "No model was supplied, defaulted to distilbert-base-uncased-finetuned-sst-2-english and revision af0f99b (https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english).\n", "Using a pipeline without specifying a model name and revision in production is not recommended.\n" ] }, { "data": { "text/plain": [ "[{'label': 'POSITIVE', 'score': 0.6012226343154907}]" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from transformers import pipeline\n", "\n", "classifier = pipeline(\"sentiment-analysis\")\n", "classifier(\"OMG this is my first time trying this!\")" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'label': 'POSITIVE', 'score': 0.9998352527618408},\n", " {'label': 'NEGATIVE', 'score': 0.9995977282524109}]" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "classifier(\n", " [\"I really like this a lot!\", \"I hate it like this.\"]\n", ")" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "No model was supplied, defaulted to facebook/bart-large-mnli and revision c626438 (https://huggingface.co/facebook/bart-large-mnli).\n", "Using a pipeline without specifying a model name and revision in production is not recommended.\n" ] }, { "data": { "text/plain": [ "{'sequence': 'How to differentiate sun and cloud?',\n", " 'labels': ['education', 'business', 'politics'],\n", " 'scores': [0.7144545316696167, 0.19746531546115875, 0.08808010816574097]}" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "classifier = pipeline(\"zero-shot-classification\")\n", "classifier(\n", " \"How to differentiate sun and cloud?\",\n", " candidate_labels = [\"education\", \"politics\", \"business\"]\n", ")" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "No model was supplied, defaulted to gpt2 and revision 6c0e608 (https://huggingface.co/gpt2).\n", "Using a pipeline without specifying a model name and revision in production is not recommended.\n", "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n", "/Users/florentiana.yuwono/anaconda3/lib/python3.10/site-packages/transformers/generation/utils.py:1353: UserWarning: Using `max_length`'s default (50) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation.\n", " warnings.warn(\n" ] }, { "data": { "text/plain": [ "[{'generated_text': 'In this class, I will speak about the key differences between Python and Java, how to do it, how to use it with Python, the importance of data literals to your data structure and more.\\n\\nA lot of the time I am'}]" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "generator = pipeline(\"text-generation\")\n", "generator(\"In this class, I will speak\")" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "data": { "text/plain": [ "[{'generated_text': 'In this class, I will speak about each of the subclasses in question which you have introduced in the introduction.\\nThe above classes are built around'},\n", " {'generated_text': 'In this class, I will speak of the fact that you can\\u2028t read the entire class,\\u202a and read all the classes,�'}]" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n", "generator(\n", " \"In this class, I will speak\",\n", " max_length=30,\n", " num_return_sequences=2\n", ")" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "No model was supplied, defaulted to distilroberta-base and revision ec58a5b (https://huggingface.co/distilroberta-base).\n", "Using a pipeline without specifying a model name and revision in production is not recommended.\n" ] }, { "data": { "text/plain": [ "[{'score': 0.06216174364089966,\n", " 'token': 42,\n", " 'token_str': ' this',\n", " 'sequence': 'The sky is blue and bright, I wonder what this is about.'},\n", " {'score': 0.040428631007671356,\n", " 'token': 24,\n", " 'token_str': ' it',\n", " 'sequence': 'The sky is blue and bright, I wonder what it is about.'},\n", " {'score': 0.023530298843979836,\n", " 'token': 14,\n", " 'token_str': ' that',\n", " 'sequence': 'The sky is blue and bright, I wonder what that is about.'}]" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "unmasker = pipeline(\"fill-mask\")\n", "unmasker(\"The sky is blue and bright, I wonder what is about.\", top_k=3)" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "No model was supplied, defaulted to dbmdz/bert-large-cased-finetuned-conll03-english and revision f2482bf (https://huggingface.co/dbmdz/bert-large-cased-finetuned-conll03-english).\n", "Using a pipeline without specifying a model name and revision in production is not recommended.\n", "/Users/florentiana.yuwono/anaconda3/lib/python3.10/site-packages/transformers/pipelines/token_classification.py:169: UserWarning: `grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to `aggregation_strategy=\"simple\"` instead.\n", " warnings.warn(\n" ] }, { "data": { "text/plain": [ "[{'entity_group': 'LOC',\n", " 'score': 0.86960346,\n", " 'word': 'Owl City',\n", " 'start': 56,\n", " 'end': 64}]" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ner = pipeline(\"ner\", grouped_entities=True)\n", "\n", "ner(\"Mine is Hilarious, usually spotted at united nations in Owl City.\")" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "No model was supplied, defaulted to distilbert-base-cased-distilled-squad and revision 626af31 (https://huggingface.co/distilbert-base-cased-distilled-squad).\n", "Using a pipeline without specifying a model name and revision in production is not recommended.\n" ] }, { "data": { "text/plain": [ "{'score': 0.4208132028579712,\n", " 'start': 0,\n", " 'end': 44,\n", " 'answer': 'Mine is Hilarious, usually grab from library'}" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "qa = pipeline(\"question-answering\")\n", "qa(\n", " question=\"Where is it?\",\n", " context=\"Mine is Hilarious, usually grab from library though.\"\n", ")" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "No model was supplied, defaulted to sshleifer/distilbart-cnn-12-6 and revision a4f8f3e (https://huggingface.co/sshleifer/distilbart-cnn-12-6).\n", "Using a pipeline without specifying a model name and revision in production is not recommended.\n" ] }, { "data": { "text/plain": [ "[{'summary_text': ' America has changed dramatically during recent years . The number of engineering graduates in the U.S. has declined in traditional engineering disciplines such as mechanical, civil, electrical, chemical, and aeronautical engineering . Rapidly developing economies such as China and India, as well as other industrial countries in Europe and Asia, continue to encourage and advance engineering .'}]" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "summarizer = pipeline(\"summarization\")\n", "summarizer(\n", " \"\"\"\n", " America has changed dramatically during recent years. Not only has the number of \n", " graduates in traditional engineering disciplines such as mechanical, civil, \n", " electrical, chemical, and aeronautical engineering declined, but in most of \n", " the premier American universities engineering curricula now concentrate on \n", " and encourage largely the study of engineering science. As a result, there \n", " are declining offerings in engineering subjects dealing with infrastructure, \n", " the environment, and related issues, and greater concentration on high \n", " technology subjects, largely supporting increasingly complex scientific \n", " developments. While the latter is important, it should not be at the expense \n", " of more traditional engineering.\n", "\n", " Rapidly developing economies such as China and India, as well as other \n", " industrial countries in Europe and Asia, continue to encourage and advance \n", " the teaching of engineering. Both China and India, respectively, graduate \n", " six and eight times as many traditional engineers as does the United States. \n", " Other industrial countries at minimum maintain their output, while America \n", " suffers an increasingly serious decline in the number of engineering graduates \n", " and a lack of well-educated engineers.\n", " \"\"\"\n", ")" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'translation_text': 'This course is produced by para.'}]" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ "translator = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-fr-en\")\n", "translator(\"Ce cours est produit par.\")" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Bias and limitations" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n", "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "['carpenter', 'lawyer', 'farmer', 'businessman', 'doctor']\n", "['nurse', 'maid', 'teacher', 'waitress', 'prostitute']\n" ] } ], "source": [ "unmasker = pipeline(\"fill-mask\", model=\"bert-base-uncased\")\n", "result = unmasker(\"This man works as a [MASK].\")\n", "print([r[\"token_str\"] for r in result])\n", "\n", "result = unmasker(\"This woman works as a [MASK].\")\n", "print([r[\"token_str\"] for r in result])" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# 2. Using Transformers" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "from transformers import AutoTokenizer" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [], "source": [ "checkpoint = \"distilbert-base-uncased-finetuned-sst-2-english\"\n", "tokenizer = AutoTokenizer.from_pretrained(checkpoint)" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "{'input_ids': tensor([[ 101, 1045, 1005, 2310, 2042, 3403, 2005, 17662, 2227, 2026,\n", " 2878, 2166, 1012, 102],\n", " [ 101, 1045, 5223, 2023, 2061, 2172, 999, 999, 102, 0,\n", " 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n", " [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]])}\n" ] } ], "source": [ "raw_inputs = [\n", " \"I've been waiting for Hugging Face my whole life.\",\n", " \"I hate this so much!!\"\n", "]\n", "inputs = tokenizer(raw_inputs, padding=True, truncation=True, return_tensors=\"pt\")\n", "print(inputs)" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Some weights of the model checkpoint at distilbert-base-uncased-finetuned-sst-2-english were not used when initializing DistilBertModel: ['classifier.bias', 'pre_classifier.bias', 'pre_classifier.weight', 'classifier.weight']\n", "- This IS expected if you are initializing DistilBertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", "- This IS NOT expected if you are initializing DistilBertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n" ] } ], "source": [ "from transformers import AutoModel\n", "\n", "checkpoint = \"distilbert-base-uncased-finetuned-sst-2-english\"\n", "model = AutoModel.from_pretrained(checkpoint)" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "torch.Size([2, 14, 768])\n" ] } ], "source": [ "outputs = model(**inputs)\n", "print(outputs.last_hidden_state.shape)" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "torch.Size([2, 2])\n", "tensor([[-3.0737, 3.1512],\n", " [ 4.0217, -3.2439]], grad_fn=)\n", "SequenceClassifierOutput(loss=None, logits=tensor([[-3.0737, 3.1512],\n", " [ 4.0217, -3.2439]], grad_fn=), hidden_states=None, attentions=None)\n" ] } ], "source": [ "from transformers import AutoModelForSequenceClassification\n", "\n", "checkpoint = \"distilbert-base-uncased-finetuned-sst-2-english\"\n", "model = AutoModelForSequenceClassification.from_pretrained(checkpoint)\n", "outputs = model(**inputs)\n", "print(outputs.logits.shape)\n", "print(outputs.logits)\n", "print(outputs)" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([[1.9756e-03, 9.9802e-01],\n", " [9.9930e-01, 6.9871e-04]], grad_fn=)\n" ] } ], "source": [ "import torch\n", "predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)\n", "print(predictions)" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "BertConfig {\n", " \"attention_probs_dropout_prob\": 0.1,\n", " \"classifier_dropout\": null,\n", " \"hidden_act\": \"gelu\",\n", " \"hidden_dropout_prob\": 0.1,\n", " \"hidden_size\": 768,\n", " \"initializer_range\": 0.02,\n", " \"intermediate_size\": 3072,\n", " \"layer_norm_eps\": 1e-12,\n", " \"max_position_embeddings\": 512,\n", " \"model_type\": \"bert\",\n", " \"num_attention_heads\": 12,\n", " \"num_hidden_layers\": 12,\n", " \"pad_token_id\": 0,\n", " \"position_embedding_type\": \"absolute\",\n", " \"transformers_version\": \"4.30.2\",\n", " \"type_vocab_size\": 2,\n", " \"use_cache\": true,\n", " \"vocab_size\": 30522\n", "}\n", "\n" ] } ], "source": [ "from transformers import BertConfig, BertModel\n", "\n", "config = BertConfig()\n", "\n", "model = BertModel(config)\n", "\n", "print(config)" ] }, { "cell_type": "code", "execution_count": 27, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "58b47a5171714102b6499f7e76b36326", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Downloading: 0%| | 0.00/570 [00:00)\n" ] } ], "source": [ "import torch\n", "from transformers import AutoTokenizer, AutoModelForSequenceClassification\n", "\n", "checkpoint = \"distilbert-base-uncased-finetuned-sst-2-english\"\n", "tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n", "model = AutoModelForSequenceClassification.from_pretrained(checkpoint)\n", "\n", "sequence = \"I've been waiting for this course my whole life.\"\n", "\n", "tokens = tokenizer.tokenize(sequence)\n", "ids = tokenizer.convert_tokens_to_ids(tokens)\n", "\n", "input_ids = torch.tensor([ids])\n", "print(input_ids)\n", "\n", "output = model(input_ids)\n", "print(output.logits)" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([[ 1.5694, -1.3895]], grad_fn=)\n", "tensor([[ 0.5803, -0.4125]], grad_fn=)\n", "tensor([[ 1.5694, -1.3895],\n", " [ 1.3373, -1.2163]], grad_fn=)\n" ] } ], "source": [ "model = AutoModelForSequenceClassification.from_pretrained(checkpoint)\n", "\n", "sequence1_ids = [[200, 200, 200]]\n", "sequence2_ids = [[200, 200]]\n", "batched_ids = [\n", " [200, 200, 200],\n", " [200, 200, tokenizer.pad_token_id]\n", "]\n", "\n", "print(model(torch.tensor(sequence1_ids)).logits)\n", "print(model(torch.tensor(sequence2_ids)).logits)\n", "print(model(torch.tensor(batched_ids)).logits)" ] }, { "cell_type": "code", "execution_count": 35, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([[ 1.5694, -1.3895],\n", " [ 0.5803, -0.4125]], grad_fn=)\n" ] } ], "source": [ "attention_mask = [\n", " [1, 1, 1], [1, 1, 0]\n", "]\n", "\n", "outputs = model(torch.tensor(batched_ids), attention_mask=torch.tensor(attention_mask))\n", "print(outputs.logits)" ] }, { "cell_type": "code", "execution_count": 37, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "SequenceClassifierOutput(loss=None, logits=tensor([[-2.7211, 2.7688],\n", " [-3.0041, 3.2001]], grad_fn=), hidden_states=None, attentions=None)\n" ] } ], "source": [ "import torch\n", "from transformers import AutoTokenizer, AutoModelForSequenceClassification\n", "\n", "checkpoint = \"distilbert-base-uncased-finetuned-sst-2-english\"\n", "tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n", "model = AutoModelForSequenceClassification.from_pretrained(checkpoint)\n", "sequences = [\"I've been waiting for this!\", \"So have I.\"]\n", "\n", "tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors=\"pt\")\n", "output = model(**tokens)\n", "print(output)" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# 3. Fine-tuning a pretrained model" ] }, { "cell_type": "code", "execution_count": 38, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForSequenceClassification: ['cls.seq_relationship.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.dense.weight', 'cls.seq_relationship.bias', 'cls.predictions.bias']\n", "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight']\n", "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n", "/Users/florentiana.yuwono/anaconda3/lib/python3.10/site-packages/transformers/optimization.py:411: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n", " warnings.warn(\n" ] } ], "source": [ "import torch\n", "from transformers import AdamW, AutoTokenizer, AutoModelForSequenceClassification\n", "\n", "checkpoint = \"bert-base-uncased\"\n", "tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n", "model = AutoModelForSequenceClassification.from_pretrained(checkpoint)\n", "sequences = [\n", " \"I've been waiting for a course for my whole life.\",\n", " \"This course is amazing!\"\n", "]\n", "batch = tokenizer(sequences, padding=True, truncation=True, return_tensors=\"pt\")\n", "\n", "batch[\"labels\"] = torch.tensor([1, 1])\n", "optimizer = AdamW(model.parameters())\n", "loss = model(**batch).loss\n", "loss.backward()\n", "optimizer.step()" ] }, { "cell_type": "code", "execution_count": 39, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "c7129bfda8404178bba7d5c23ac3c67b", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Downloading builder script: 0%| | 0.00/28.8k [00:00