{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "name": "L4 Project.ipynb", "provenance": [], "collapsed_sections": [], "include_colab_link": true }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "markdown", "metadata": { "id": "view-in-github", "colab_type": "text" }, "source": [ "\"Open" ] }, { "cell_type": "code", "metadata": { "id": "tGd7LV0Qtxwz" }, "source": [ "# Code from https://ramsrigoutham.medium.com/sized-fill-in-the-blank-or-multi-mask-filling-with-roberta-and-huggingface-transformers-58eb9e7fb0c" ], "execution_count": 47, "outputs": [] }, { "cell_type": "code", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "KuvW3fmor6Tu", "outputId": "0f032f97-e518-432d-cf2b-b52bc0e6b80d" }, "source": [ "!pip install transformers\n", "import torch" ], "execution_count": 48, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Requirement already satisfied: transformers in /usr/local/lib/python3.7/dist-packages (4.11.3)\n", "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from transformers) (21.0)\n", "Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers) (2.23.0)\n", "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (1.19.5)\n", "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.7/dist-packages (from transformers) (6.0)\n", "Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from transformers) (4.8.1)\n", "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (2019.12.20)\n", "Requirement already satisfied: huggingface-hub>=0.0.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (0.0.19)\n", "Requirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers) (3.3.0)\n", "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers) (4.62.3)\n", "Requirement already satisfied: tokenizers<0.11,>=0.10.1 in /usr/local/lib/python3.7/dist-packages (from transformers) (0.10.3)\n", "Requirement already satisfied: sacremoses in /usr/local/lib/python3.7/dist-packages (from transformers) (0.0.46)\n", "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from huggingface-hub>=0.0.17->transformers) (3.7.4.3)\n", "Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=20.0->transformers) (2.4.7)\n", "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->transformers) (3.6.0)\n", "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (1.24.3)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2021.5.30)\n", "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (3.0.4)\n", "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2.10)\n", "Requirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.0.1)\n", "Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.15.0)\n", "Requirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (7.1.2)\n" ] } ] }, { "cell_type": "code", "metadata": { "id": "AIRsE899r-53", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "f1181c17-41fd-4d23-befc-484bae0a5481" }, "source": [ "from transformers import RobertaTokenizer, RobertaForMaskedLM\n", "tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n", "model = RobertaForMaskedLM.from_pretrained('roberta-base')\n", "\n", "def set_seed(seed: int):\n", " \"\"\"\n", " Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if\n", " installed).\n", "\n", " Args:\n", " seed (:obj:`int`): The seed to set.\n", " \"\"\"\n", " #random.seed(seed)\n", " #np.random.seed(seed)\n", " #if is_torch_available():\n", " torch.manual_seed(seed)\n", " torch.cuda.manual_seed_all(seed)\n", " # ^^ safe to call this function even if cuda is not available\n", " #if is_tf_available():\n", " #tf.random.set_seed(seed)\n", " \n", "def get_prediction(sent):\n", " \n", " token_ids = tokenizer.encode(sent, return_tensors='pt')\n", " masked_position = (token_ids.squeeze() == tokenizer.mask_token_id).nonzero()\n", " masked_pos = [mask.item() for mask in masked_position ]\n", "\n", " with torch.no_grad():\n", " output = model(token_ids)\n", "\n", " last_hidden_state = output[0].squeeze()\n", "\n", " list_of_list =[]\n", " for index,mask_index in enumerate(masked_pos):\n", " mask_hidden_state = last_hidden_state[mask_index]\n", " idx = torch.topk(mask_hidden_state, k=5, dim=0)[1]\n", " words = [tokenizer.decode(i.item()).strip() for i in idx]\n", " list_of_list.append(words)\n", " print(\"Mask \",index+1,\"Guesses : \",words)\n", " \n", " best_guess = \"\"\n", " for j in list_of_list:\n", " best_guess = best_guess+\" \"+j[0]\n", " \n", " return best_guess\n", "\n", "sentence = \"Manchester United are ___ ___ ___ champions.\"\n", "print (\"Original Sentence: \",sentence)\n", "sentence = sentence.replace(\"___\",\"\")\n", "print (\"Original Sentence replaced with mask: \",sentence)\n", "print (\"\\n\")\n", "\n", "predicted_blanks = get_prediction(sentence)\n", "print (\"\\nBest guess for fill in the blank :::\",predicted_blanks)" ], "execution_count": 49, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: Manchester United are ___ ___ ___ champions.\n", "Original Sentence replaced with mask: Manchester United are champions.\n", "\n", "\n", "Mask 1 Guesses : ['the', 'defending', 'currently', 'reigning', 'crowned']\n", "Mask 2 Guesses : ['reigning', 'defending', 'the', 'crowned', 'Premier']\n", "Mask 3 Guesses : ['League', 'league', 'defending', 'world', 'four']\n", "\n", "Best guess for fill in the blank ::: the reigning League\n" ] } ] }, { "cell_type": "code", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "dDeTSyiisEJJ", "outputId": "ce4cfa3c-875f-4225-ab2e-dd7b4e2a012a" }, "source": [ "from transformers import pipeline\n", "text_generation = pipeline(\"text-generation\")\n", "\n", "limericks = []\n", "set_seed(31)\n", "\n", "starting_words = [[\"That\", \"Had\", \"Not\", \"But\", \"That\"], \n", " [\"There\", \"Who\", \"She\", \"Tormenting\", \"Til\"],\n", " [\"Relentless\", \"This\", \"First\", \"and\", \"then\"],\n", " [\"There\", \"Who\", \"That\", \"To\", \"She\"],\n", " [\"There\", \"Who\", \"Two\", \"Four\", \"Have\"]]\n", "\n", "rhyming_words = [[\"told\", \"bold\", \"woodchuck\", \"truck\", \"road\"], \n", " [\"Nice\", \"grease\", \"house\", \"spouse\", \"peace\"],\n", " [\"deadlines\", \"lines\", \"edits\", \"credits\", \"wine\"],\n", " [\"Lynn\", \"thin\", \"essayed\", \"lemonade\", \"in\"],\n", " [\"beard\", \"feared\", \"hen\", \"wren\", \"beard\"]]\n", "\n", "for i in range(len(starting_words)):\n", " limerick = \"\"\n", "\n", " for j in range(5):\n", " gpt2_sentence = text_generation(starting_words[i][j], max_length=3, do_sample=False)[0]\n", " sentence = gpt2_sentence['generated_text'] + \" ___ ___ ___ \" + rhyming_words[i][j]\n", " print(\"Original Sentence: \",sentence)\n", " sentence = sentence.replace(\"___\",\"\")\n", " print(\"Original Sentence replaced with mask: \",sentence)\n", " print(\"\\n\")\n", "\n", " predicted_blanks = get_prediction(sentence)\n", " print(\"\\nBest guess for fill in the blank: \", predicted_blanks)\n", " limerick = limerick + gpt2_sentence['generated_text'] + predicted_blanks + \" \" + rhyming_words[i][j] + \"\\n\"\n", "\n", " limericks.append(limerick)\n", "\n", "print(\"\\n\")\n", "print(f\"Generated {len(limericks)} limericks: \\n\")\n", "for limerick in limericks:\n", " print(limerick)" ], "execution_count": 50, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ "No model was supplied, defaulted to gpt2 (https://huggingface.co/gpt2)\n", "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n", "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: That is why ___ ___ ___ told\n", "Original Sentence replaced with mask: That is why told\n", "\n", "\n", "Mask 1 Guesses : ['you', 'the', 'we', 'I', 'they']\n", "Mask 2 Guesses : ['will', 'should', 'must', 'is', 'are']\n", "Mask 3 Guesses : ['be', 'been', 'not', 'never', 'being']\n", "\n", "Best guess for fill in the blank: you will be\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: Had the same ___ ___ ___ bold\n", "Original Sentence replaced with mask: Had the same bold\n", "\n", "\n", "Mask 1 Guesses : ['font', 'color', 'name', 'text', 'letters']\n", "Mask 2 Guesses : [',', 'but', '.', 'color', 'and']\n", "Mask 3 Guesses : ['in', ':', 'but', 'be', ',']\n", "\n", "Best guess for fill in the blank: font , in\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: Not the only ___ ___ ___ woodchuck\n", "Original Sentence replaced with mask: Not the only woodchuck\n", "\n", "\n", "Mask 1 Guesses : ['one', 'non', 'species', 'kind', 'American']\n", "Mask 2 Guesses : ['-', 'of', 'for', 'in', 'with']\n", "Mask 3 Guesses : ['the', 'a', 'eating', 'y', 'American']\n", "\n", "Best guess for fill in the blank: one - the\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: But the most ___ ___ ___ truck\n", "Original Sentence replaced with mask: But the most truck\n", "\n", "\n", "Mask 1 Guesses : ['important', 'dangerous', 'valuable', 'interesting', 'expensive']\n", "Mask 2 Guesses : ['is', ':', '-', '?', 'kind']\n", "Mask 3 Guesses : ['the', 'a', 'pickup', 'delivery', 'is']\n", "\n", "Best guess for fill in the blank: important is the\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: That is why ___ ___ ___ road\n", "Original Sentence replaced with mask: That is why road\n", "\n", "\n", "Mask 1 Guesses : ['I', 'we', 'they', 'you', 'he']\n", "Mask 2 Guesses : ['built', 'closed', 'build', 'cross', 'need']\n", "Mask 3 Guesses : ['the', 'this', 'a', 'that', 'my']\n", "\n", "Best guess for fill in the blank: I built the\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: There is no ___ ___ ___ Nice\n", "Original Sentence replaced with mask: There is no Nice\n", "\n", "\n", "Mask 1 Guesses : ['way', 'one', 'other', 'good', 'more']\n", "Mask 2 Guesses : ['to', 'city', 'way', 'state', 'of']\n", "Mask 3 Guesses : ['in', 'for', 'of', 'to', 'than']\n", "\n", "Best guess for fill in the blank: way to in\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: Who is the ___ ___ ___ grease\n", "Original Sentence replaced with mask: Who is the grease\n", "\n", "\n", "Mask 1 Guesses : ['master', 'king', 'man', 'god', 'boss']\n", "Mask 2 Guesses : ['of', 'in', '?', 'with', 'behind']\n", "Mask 3 Guesses : ['elbow', 'the', 'of', 'bacon', 'in']\n", "\n", "Best guess for fill in the blank: master of elbow\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n", "Input length of input_ids is 3, but ``max_length`` is set to 3.This can lead to unexpected behavior. You should consider increasing ``config.max_length`` or ``max_length``.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: She, who ___ ___ ___ house\n", "Original Sentence replaced with mask: She, who house\n", "\n", "\n", "Mask 1 Guesses : ['lived', ',', 'lives', 'never', 'owns']\n", "Mask 2 Guesses : ['in', ',', 'a', 'the', 'her']\n", "Mask 3 Guesses : ['the', 'her', 'own', 'a', ',']\n", "\n", "Best guess for fill in the blank: lived in the\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: Tormenting the ___ ___ ___ spouse\n", "Original Sentence replaced with mask: Tormenting the spouse\n", "\n", "\n", "Mask 1 Guesses : ['life', 'relationship', 'soul', 'marriage', 'love']\n", "Mask 2 Guesses : ['of', 'with', 'and', 'in', 'for']\n", "Mask 3 Guesses : ['the', 'your', 'a', 'its', 'their']\n", "\n", "Best guess for fill in the blank: life of the\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n", "Input length of input_ids is 3, but ``max_length`` is set to 3.This can lead to unexpected behavior. You should consider increasing ``config.max_length`` or ``max_length``.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: Tilted ___ ___ ___ peace\n", "Original Sentence replaced with mask: Tilted peace\n", "\n", "\n", "Mask 1 Guesses : ['by', ',', 'war', 'over', 'water']\n", "Mask 2 Guesses : [',', 'of', ':', 'to', 'for']\n", "Mask 3 Guesses : ['of', 'for', 'world', 'not', 'fragile']\n", "\n", "Best guess for fill in the blank: by , of\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: Relentless, ___ ___ ___ deadlines\n", "Original Sentence replaced with mask: Relentless, deadlines\n", "\n", "\n", "Mask 1 Guesses : ['focused', 'un', 'self', 'never', 'determined']\n", "Mask 2 Guesses : ['-', 'to', ',', 'and', 'of']\n", "Mask 3 Guesses : ['meet', 'of', 'to', ',', 'and']\n", "\n", "Best guess for fill in the blank: focused - meet\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: This is a ___ ___ ___ lines\n", "Original Sentence replaced with mask: This is a lines\n", "\n", "\n", "Mask 1 Guesses : ['list', 'sample', 'function', 'sequence', 'summary']\n", "Mask 2 Guesses : ['of', 'with', 'for', 'between', 'from']\n", "Mask 3 Guesses : ['the', 'these', 'two', 'some', 'those']\n", "\n", "Best guess for fill in the blank: list of the\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: First, the ___ ___ ___ edits\n", "Original Sentence replaced with mask: First, the edits\n", "\n", "\n", "Mask 1 Guesses : ['most', 'first', 'big', 'more', 'final']\n", "Mask 2 Guesses : ['-', 'and', 'of', ',', 'for']\n", "Mask 3 Guesses : ['the', 'and', 'of', 'final', 'my']\n", "\n", "Best guess for fill in the blank: most - the\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: and, in ___ ___ ___ credits\n", "Original Sentence replaced with mask: and, in credits\n", "\n", "\n", "Mask 1 Guesses : ['addition', 'the', 'particular', 'part', 'conjunction']\n", "Mask 2 Guesses : [',', 'to', 'of', 'the', 'with']\n", "Mask 3 Guesses : ['the', 'closing', 'film', ',', 'movie']\n", "\n", "Best guess for fill in the blank: addition , the\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: then, the ___ ___ ___ wine\n", "Original Sentence replaced with mask: then, the wine\n", "\n", "\n", "Mask 1 Guesses : ['next', 'wine', 'whole', 'first', 'final']\n", "Mask 2 Guesses : ['of', 'is', ':', ',', 'question']\n", "Mask 3 Guesses : ['red', 'white', 'the', 'of', 'fine']\n", "\n", "Best guess for fill in the blank: next of red\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: There is no ___ ___ ___ Lynn\n", "Original Sentence replaced with mask: There is no Lynn\n", "\n", "\n", "Mask 1 Guesses : ['cure', 'way', 'end', 'right', 'justice']\n", "Mask 2 Guesses : ['for', 'to', 'of', 'in', 'like']\n", "Mask 3 Guesses : ['Christopher', 'Lord', 'Jeremy', 'Chris', 'Richard']\n", "\n", "Best guess for fill in the blank: cure for Christopher\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: Who is the ___ ___ ___ thin\n", "Original Sentence replaced with mask: Who is the thin\n", "\n", "\n", "Mask 1 Guesses : ['person', 'guy', 'one', 'woman', 'man']\n", "Mask 2 Guesses : ['who', 'with', 'that', '?', 'not']\n", "Mask 3 Guesses : ['is', 'looks', 'and', 'too', \"'s\"]\n", "\n", "Best guess for fill in the blank: person who is\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: That is why ___ ___ ___ essayed\n", "Original Sentence replaced with mask: That is why essayed\n", "\n", "\n", "Mask 1 Guesses : ['I', 'the', 'he', 'this', 'you']\n", "Mask 2 Guesses : ['was', 'is', 'have', 'has', 'will']\n", "Mask 3 Guesses : ['was', 'is', 'be', 'being', 'been']\n", "\n", "Best guess for fill in the blank: I was was\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: To the next ___ ___ ___ lemonade\n", "Original Sentence replaced with mask: To the next lemonade\n", "\n", "\n", "Mask 1 Guesses : ['level', 'step', 'day', 'generation', 'round']\n", "Mask 2 Guesses : [',', ':', '', 'of', '.']\n", "Mask 3 Guesses : ['more', 'the', 'of', 'make', 'More']\n", "\n", "Best guess for fill in the blank: level , more\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: She, who ___ ___ ___ in\n", "Original Sentence replaced with mask: She, who in\n", "\n", "\n", "Mask 1 Guesses : ['is', 'was', 'has', 'I', 'had']\n", "Mask 2 Guesses : ['a', ',', 'an', 'also', 'been']\n", "Mask 3 Guesses : ['interested', 'live', 'involved', 'born', 'lives']\n", "\n", "Best guess for fill in the blank: is a interested\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: There is no ___ ___ ___ beard\n", "Original Sentence replaced with mask: There is no beard\n", "\n", "\n", "Mask 1 Guesses : ['reason', 'need', 'point', 'place', 'room']\n", "Mask 2 Guesses : ['for', 'to', 'in', 'with', 'of']\n", "Mask 3 Guesses : ['a', 'the', 'your', 'no', 'his']\n", "\n", "Best guess for fill in the blank: reason for a\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: Who is the ___ ___ ___ feared\n", "Original Sentence replaced with mask: Who is the feared\n", "\n", "\n", "Mask 1 Guesses : ['person', 'man', 'terrorist', 'leader', 'one']\n", "Mask 2 Guesses : ['who', 'leader', 'and', ',', 'that']\n", "Mask 3 Guesses : ['is', 'most', 'and', 'be', 'are']\n", "\n", "Best guess for fill in the blank: person who is\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: Two, the ___ ___ ___ hen\n", "Original Sentence replaced with mask: Two, the hen\n", "\n", "\n", "Mask 1 Guesses : ['mother', 'm', 'f', 'hen', 'n']\n", "Mask 2 Guesses : ['of', '-', 'and', \"'s\", ',']\n", "Mask 3 Guesses : ['the', 'a', 'mother', 'he', 'house']\n", "\n", "Best guess for fill in the blank: mother of the\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Original Sentence: Four, the ___ ___ ___ wren\n", "Original Sentence replaced with mask: Four, the wren\n", "\n", "\n", "Mask 1 Guesses : ['m', 'great', 'k', 'last', 'n']\n", "Mask 2 Guesses : ['of', '-', 'and', ',', 'th']\n", "Mask 3 Guesses : ['the', 'of', 'a', 'ian', \"'s\"]\n", "\n", "Best guess for fill in the blank: m of the\n", "Original Sentence: Have the same ___ ___ ___ beard\n", "Original Sentence replaced with mask: Have the same beard\n", "\n", "\n", "Mask 1 Guesses : ['dark', 'hair', 'full', 'long', 'gray']\n", "Mask 2 Guesses : ['-', 'and', 'as', 'but', ',']\n", "Mask 3 Guesses : ['your', 'and', 'a', 'no', 'white']\n", "\n", "Best guess for fill in the blank: dark - your\n", "\n", "\n", "Generated 5 limericks: \n", "\n", "That is why you will be told\n", "Had the same font , in bold\n", "Not the only one - the woodchuck\n", "But the most important is the truck\n", "That is why I built the road\n", "\n", "There is no way to in Nice\n", "Who is the master of elbow grease\n", "She, who lived in the house\n", "Tormenting the life of the spouse\n", "Tilted by , of peace\n", "\n", "Relentless, focused - meet deadlines\n", "This is a list of the lines\n", "First, the most - the edits\n", "and, in addition , the credits\n", "then, the next of red wine\n", "\n", "There is no cure for Christopher Lynn\n", "Who is the person who is thin\n", "That is why I was was essayed\n", "To the next level , more lemonade\n", "She, who is a interested in\n", "\n", "There is no reason for a beard\n", "Who is the person who is feared\n", "Two, the mother of the hen\n", "Four, the m of the wren\n", "Have the same dark - your beard\n", "\n" ] } ] } ] }