{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T11:43:54.331614Z",
     "start_time": "2025-09-17T11:43:41.962471Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from datasets import load_dataset\n",
    "import random\n",
    "\n",
    "\n",
    "# Download QASPER dataset from HuggingFace https://huggingface.co/datasets/allenai/qasper\n",
    "dataset = load_dataset(\"allenai/qasper\")\n",
    "\n",
    "# Split the dataset into train, validation, and test splits\n",
    "train_dataset = dataset[\"train\"]\n",
    "validation_dataset = dataset[\"validation\"]\n",
    "test_dataset = dataset[\"test\"]\n",
    "\n",
    "random.seed(42)  # Set a random seed for reproducibility\n",
    "\n",
    "# Randomly sample 800 rows from the training split\n",
    "train_sampled_indices = random.sample(range(len(train_dataset)), 800)\n",
    "train_samples = [train_dataset[i] for i in train_sampled_indices]\n",
    "\n",
    "\n",
    "# Randomly sample 100 rows from the test split\n",
    "test_sampled_indices = random.sample(range(len(test_dataset)), 80)\n",
    "test_samples = [test_dataset[i] for i in test_sampled_indices]\n",
    "\n",
    "# Now we have 800 research papers for training and 80 research papers to evaluate on"
   ],
   "id": "2627ff88f5ab7150",
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T11:46:48.324409Z",
     "start_time": "2025-09-17T11:46:20.613414Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from llama_index.core import Settings\n",
    "os.environ['DEEPSEEK_API_KEY']='sk-f5f3487896554276aaf657fe9b9661a7'\n",
    "from llama_index.llms.deepseek import DeepSeek\n",
    "from llama_index.core.embeddings import resolve_embed_model\n",
    "\n",
    "base_embed_model = resolve_embed_model(\"local:D:/pythonProject17/transformers/model_em/BAAI/bge-small-en-v1.5\")\n",
    "llm=DeepSeek(model='deepseek-chat')\n",
    "\n",
    "Settings.llm=llm\n",
    "Settings.embed_model=base_embed_model"
   ],
   "id": "8c0d1e7245de7723",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:sentence_transformers.SentenceTransformer:Load pretrained SentenceTransformer: D:/pythonProject17/transformers/model_em/BAAI/bge-small-en-v1.5\n",
      "Load pretrained SentenceTransformer: D:/pythonProject17/transformers/model_em/BAAI/bge-small-en-v1.5\n"
     ]
    }
   ],
   "execution_count": 11
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T11:43:54.458195Z",
     "start_time": "2025-09-17T11:43:54.356616Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Get full text paper data , questions on the paper from training samples of QASPER to generate training dataset for cross-encoder finetuning\n",
    "from typing import List\n",
    "\n",
    "\n",
    "# Utility function to get full-text of the research papers from the dataset\n",
    "def get_full_text(sample: dict) -> str:\n",
    "    \"\"\"\n",
    "    :param dict sample: the row sample from QASPER\n",
    "    \"\"\"\n",
    "    title = sample[\"title\"]\n",
    "    abstract = sample[\"abstract\"]\n",
    "    sections_list = sample[\"full_text\"][\"section_name\"]\n",
    "    paragraph_list = sample[\"full_text\"][\"paragraphs\"]\n",
    "    combined_sections_with_paras = \"\"\n",
    "    if len(sections_list) == len(paragraph_list):\n",
    "        combined_sections_with_paras += title + \"\\t\"\n",
    "        combined_sections_with_paras += abstract + \"\\t\"\n",
    "        for index in range(0, len(sections_list)):\n",
    "            combined_sections_with_paras += str(sections_list[index]) + \"\\t\"\n",
    "            combined_sections_with_paras += \"\".join(paragraph_list[index])\n",
    "        return combined_sections_with_paras\n",
    "\n",
    "    else:\n",
    "        print(\"Not the same number of sections as paragraphs list\")\n",
    "\n",
    "\n",
    "# utility function to extract list of questions from the dataset\n",
    "def get_questions(sample: dict) -> List[str]:\n",
    "    \"\"\"\n",
    "    :param dict sample: the row sample from QASPER\n",
    "    \"\"\"\n",
    "    questions_list = sample[\"qas\"][\"question\"]\n",
    "    return questions_list\n",
    "\n",
    "\n",
    "doc_qa_dict_list = []\n",
    "\n",
    "for train_sample in train_samples:\n",
    "    full_text = get_full_text(train_sample)\n",
    "    questions_list = get_questions(train_sample)\n",
    "    local_dict = {\"paper\": full_text, \"questions\": questions_list}\n",
    "    doc_qa_dict_list.append(local_dict)"
   ],
   "id": "c4c1b53e87bcb635",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T11:43:54.506196Z",
     "start_time": "2025-09-17T11:43:54.483195Z"
    }
   },
   "cell_type": "code",
   "source": "len(doc_qa_dict_list)",
   "id": "72495bbbdfbe0ba7",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "800"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T11:43:55.412623Z",
     "start_time": "2025-09-17T11:43:54.651194Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Save training data as a csv\n",
    "import pandas as pd\n",
    "\n",
    "df_train = pd.DataFrame(doc_qa_dict_list)\n",
    "df_train.to_csv(\"train.csv\")"
   ],
   "id": "2fa43fd339fe9559",
   "outputs": [],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T11:43:55.459622Z",
     "start_time": "2025-09-17T11:43:55.424624Z"
    }
   },
   "cell_type": "code",
   "source": "df_train",
   "id": "4f392718c92a9950",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "                                                 paper  \\\n",
       "0    Deeper Task-Specificity Improves Joint Entity ...   \n",
       "1    From FiLM to Video: Multi-turn Question Answer...   \n",
       "2    Enriching BERT with Knowledge Graph Embeddings...   \n",
       "3    Why Do Urban Legends Go Viral?\\tUrban legends ...   \n",
       "4    A Resource for Studying Chatino Verbal Morphol...   \n",
       "..                                                 ...   \n",
       "795  BLEURT: Learning Robust Metrics for Text Gener...   \n",
       "796  Attention Is (not) All You Need for Commonsens...   \n",
       "797  Neural Factor Graph Models for Cross-lingual M...   \n",
       "798  VAIS Hate Speech Detection System: A Deep Lear...   \n",
       "799  A Joint Model for Question Answering and Quest...   \n",
       "\n",
       "                                             questions  \n",
       "0    [Do they repot results only on English data?, ...  \n",
       "1    [At which interval do they extract video and a...  \n",
       "2    [By how much do they outperform standard BERT?...  \n",
       "3            [How accurate is their predictive model?]  \n",
       "4    [How does morphological analysis differ from m...  \n",
       "..                                                 ...  \n",
       "795        [How are the synthetic examples generated?]  \n",
       "796  [Which datasets do they evaluate on?, How does...  \n",
       "797  [What other cross-lingual approaches is the mo...  \n",
       "798  [What was the baseline?, Is the data all in Vi...  \n",
       "799  [Which components of QA and QG models are shar...  \n",
       "\n",
       "[800 rows x 2 columns]"
      ],
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>paper</th>\n",
       "      <th>questions</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>Deeper Task-Specificity Improves Joint Entity ...</td>\n",
       "      <td>[Do they repot results only on English data?, ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>From FiLM to Video: Multi-turn Question Answer...</td>\n",
       "      <td>[At which interval do they extract video and a...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>Enriching BERT with Knowledge Graph Embeddings...</td>\n",
       "      <td>[By how much do they outperform standard BERT?...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>Why Do Urban Legends Go Viral?\\tUrban legends ...</td>\n",
       "      <td>[How accurate is their predictive model?]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>A Resource for Studying Chatino Verbal Morphol...</td>\n",
       "      <td>[How does morphological analysis differ from m...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>795</th>\n",
       "      <td>BLEURT: Learning Robust Metrics for Text Gener...</td>\n",
       "      <td>[How are the synthetic examples generated?]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>796</th>\n",
       "      <td>Attention Is (not) All You Need for Commonsens...</td>\n",
       "      <td>[Which datasets do they evaluate on?, How does...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>797</th>\n",
       "      <td>Neural Factor Graph Models for Cross-lingual M...</td>\n",
       "      <td>[What other cross-lingual approaches is the mo...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>798</th>\n",
       "      <td>VAIS Hate Speech Detection System: A Deep Lear...</td>\n",
       "      <td>[What was the baseline?, Is the data all in Vi...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>799</th>\n",
       "      <td>A Joint Model for Question Answering and Quest...</td>\n",
       "      <td>[Which components of QA and QG models are shar...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>800 rows × 2 columns</p>\n",
       "</div>"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T11:43:55.507623Z",
     "start_time": "2025-09-17T11:43:55.478622Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Get evaluation data papers , questions and answers\n",
    "\"\"\"\n",
    "The Answers field in the dataset follow the below format:-\n",
    "Unanswerable answers have \"unanswerable\" set to true.\n",
    "\n",
    "The remaining answers have exactly one of the following fields being non-empty.\n",
    "\n",
    "\"extractive_spans\" are spans in the paper which serve as the answer.\n",
    "\"free_form_answer\" is a written out answer.\n",
    "\"yes_no\" is true iff the answer is Yes, and false iff the answer is No.\n",
    "\n",
    "We accept only free-form answers and for all the other kind of answers we set their value to 'Unacceptable',\n",
    "to better evaluate the performance of the query engine using pairwise comparison evaluator as it uses GPT-4 which is biased towards preferring long answers more.\n",
    "https://www.anyscale.com/blog/a-comprehensive-guide-for-building-rag-based-llm-applications-part-1\n",
    "\n",
    "So in the case of 'yes_no' answers it can favour Query Engine answers more than reference answers.\n",
    "Also in the case of extracted spans it can favour reference answers more than Query engine generated answers.\n",
    "\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "eval_doc_qa_answer_list = []\n",
    "\n",
    "\n",
    "# Utility function to extract answers from the dataset\n",
    "def get_answers(sample: dict) -> List[str]:\n",
    "    \"\"\"\n",
    "    :param dict sample: the row sample from the train split of QASPER\n",
    "    \"\"\"\n",
    "    final_answers_list = []\n",
    "    answers = sample[\"qas\"][\"answers\"]\n",
    "    for answer in answers:\n",
    "        local_answer = \"\"\n",
    "        types_of_answers = answer[\"answer\"][0]\n",
    "        if types_of_answers[\"unanswerable\"] == False:\n",
    "            if types_of_answers[\"free_form_answer\"] != \"\":\n",
    "                local_answer = types_of_answers[\"free_form_answer\"]\n",
    "            else:\n",
    "                local_answer = \"Unacceptable\"\n",
    "        else:\n",
    "            local_answer = \"Unacceptable\"\n",
    "\n",
    "        final_answers_list.append(local_answer)\n",
    "\n",
    "    return final_answers_list\n",
    "\n",
    "\n",
    "for test_sample in test_samples:\n",
    "    full_text = get_full_text(test_sample)\n",
    "    questions_list = get_questions(test_sample)\n",
    "    answers_list = get_answers(test_sample)\n",
    "    local_dict = {\n",
    "        \"paper\": full_text,\n",
    "        \"questions\": questions_list,\n",
    "        \"answers\": answers_list,\n",
    "    }\n",
    "    eval_doc_qa_answer_list.append(local_dict)"
   ],
   "id": "32dfdcd40385d9ad",
   "outputs": [],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T11:43:55.539622Z",
     "start_time": "2025-09-17T11:43:55.519622Z"
    }
   },
   "cell_type": "code",
   "source": "len(eval_doc_qa_answer_list)",
   "id": "a25a4c6bf5ce2773",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "80"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T11:43:55.664664Z",
     "start_time": "2025-09-17T11:43:55.583626Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Save eval data as a csv\n",
    "import pandas as pd\n",
    "\n",
    "df_test = pd.DataFrame(eval_doc_qa_answer_list)\n",
    "df_test.to_csv(\"test.csv\")\n",
    "\n",
    "# The Rag Eval test data can be found at the below dropbox link\n",
    "# https://www.dropbox.com/scl/fi/3lmzn6714oy358mq0vawm/test.csv?rlkey=yz16080te4van7fvnksi9kaed&dl=0"
   ],
   "id": "1fd2c7cf7ba5b35c",
   "outputs": [],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T11:44:13.652542Z",
     "start_time": "2025-09-17T11:43:55.677623Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Generate the respective training dataset from the initial train data collected from QASPER in the format required by\n",
    "import os\n",
    "from llama_index.core import SimpleDirectoryReader\n",
    "# import openai\n",
    "from llama_index.finetuning.cross_encoders.dataset_gen import (\n",
    "    generate_ce_fine_tuning_dataset,\n",
    "    generate_synthetic_queries_over_documents,\n",
    ")\n",
    "\n",
    "from llama_index.finetuning.cross_encoders import CrossEncoderFinetuneEngine\n",
    "\n",
    "# os.environ[\"OPENAI_API_KEY\"] = \"sk-\"\n",
    "# openai.api_key = os.environ[\"OPENAI_API_KEY\"]"
   ],
   "id": "cb46b7c8563e1c4e",
   "outputs": [],
   "execution_count": 9
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T12:11:33.973443Z",
     "start_time": "2025-09-17T12:01:41.389883Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from llama_index.core import Document\n",
    "\n",
    "# 只选择前10篇论文进行处理\n",
    "papers_to_process = doc_qa_dict_list[:10]\n",
    "\n",
    "final_finetuning_data_list = []\n",
    "for paper in papers_to_process:  # <--- 修改在这里\n",
    "    questions_list = paper[\"questions\"]\n",
    "    documents = [Document(text=paper[\"paper\"])]\n",
    "\n",
    "    print(f\"Processing paper with {len(questions_list)} questions...\") # 可以加一句输出来看进度\n",
    "\n",
    "    local_finetuning_dataset = generate_ce_fine_tuning_dataset(\n",
    "        documents=documents,\n",
    "        questions_list=questions_list,\n",
    "        max_chunk_length=256,\n",
    "        top_k=5,\n",
    "        llm=llm # 如果需要，请确保 llm 变量已定义\n",
    "    )\n",
    "    final_finetuning_data_list.extend(local_finetuning_dataset)\n",
    "\n",
    "print(f\"\\nProcessing finished. Generated {len(final_finetuning_data_list)} training samples from 10 papers.\")"
   ],
   "id": "62352548cfb781d2",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Processing paper with 4 questions...\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "  0%|          | 0/4 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "58a3d4d923084209b05e1c7302cefdf2"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "Processing paper with 3 questions...\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "  0%|          | 0/3 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "337f5391252444bfac07d79acd9476c3"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "Processing paper with 3 questions...\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "  0%|          | 0/3 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "161754044d064a678910a5be36674a99"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "Processing paper with 1 questions...\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "  0%|          | 0/1 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "4351e778f15f41468a4be47328d10d48"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "Processing paper with 7 questions...\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "  0%|          | 0/7 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "7766b929106044f083038ec30b0c3041"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "Processing paper with 2 questions...\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "  0%|          | 0/2 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "535f2db2a6f1414390ef90c84d0193dc"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "Processing paper with 5 questions...\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "  0%|          | 0/5 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "05bda253e24c47799e30695d0b6ea6bf"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "Processing paper with 1 questions...\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "  0%|          | 0/1 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "83ffc4158d59498a8904cd0f2f1764b0"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "Processing paper with 4 questions...\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "  0%|          | 0/4 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "1def7e4099b649b4887419fcf5d0bfce"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "Processing paper with 5 questions...\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "  0%|          | 0/5 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "f49b88570a314cdab84e4e7455ea341e"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "\n",
      "Processing finished. Generated 175 training samples from 10 papers.\n"
     ]
    }
   ],
   "execution_count": 17
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T12:27:14.306985Z",
     "start_time": "2025-09-17T12:27:14.232981Z"
    }
   },
   "cell_type": "code",
   "source": "final_finetuning_data_list",
   "id": "44868db3e195d509",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[CrossEncoderFinetuningDatasetSample(query='Do they repot results only on English data?', context='addition to precision, recall, and F1 scores for both tasks, we show the average of the F1 scores across both tasks. On the ADE dataset, we achieve SOTA results for both the NER and RE tasks. On the CoNLL04 dataset, we achieve SOTA results on the NER task, while our performance on the RE task is competitive with other recent models. On both datasets, we achieve SOTA results when considering the average F1 score across both tasks. The largest gain relative to the previous SOTA performance is on the RE task of the ADE dataset, where we see an absolute improvement of 4.5 on the macro-average F1 score.While the model of Eberts and Ulges eberts2019span outperforms our proposed architecture on the CoNLL04 RE task, their results come at the cost of greater model complexity. As mentioned above, Eberts and Ulges fine-tune the BERTBASE model, which has 110 million trainable parameters. In contrast, given the hyperparameters used for final training on the CoNLL04 dataset, our proposed architecture has approximately 6 million trainable parameters.The fact that the optimal number of task-specific layers differed between the two datasets demonstrates the', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they repot results only on English data?', context=\"which contains 910 training, 243 dev, and 288 test sentences. All hyperparameters are tuned against the dev set. Final results are obtained by averaging results from five trials with random weight initializations in which we trained on the combined training and dev sets and evaluated on the test set. As previous work using the CoNLL04 dataset has reported both micro- and macro-averages, we report both sets of metrics.In evaluating NER performance on these datasets, a predicted entity is only considered a true positive if both the entity's span and span type are correctly predicted. In evaluating RE performance, we follow previous work in adopting a strict evaluation method wherein a predicted relation is only considered correct if the spans corresponding to the two arguments of this relation and the entity types of these spans are also predicted correctly. We experimented with LSTMs and GRUs for all BiRNN layers in the model and experimented with using $1-3$ shared BiRNN layers and $0-3$ task-specific BiRNN layers for each task. Hyperparameters used for final training are listed in Table TABREF17.Experiments ::: Results\\tFull results for the performance of our model, as well as other recent work, are shown in Table TABREF18. In\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they repot results only on English data?', context='pre-trained BERT model that is fine-tuned during training. Representations for candidate entity spans are obtained by max pooling over all tokens in each span. Span representations are passed through an entity classification layer to solve the NER task. Representations of all pairs of spans that are predicted to be entities and representations of the contexts between these pairs are then passed through a final layer with sigmoid activation to predict relations between entities. With respect to their degrees of task-specificity, these span-based approaches resemble the BIO/BILOU approaches in which the majority of model parameters are shared, but each task possesses independent scoring and/or output layers.Overall, previous approaches to joint NER and RE have experimented little with deep task-specificity, with the exception of those models that include additional layers for the RE task. To our knowledge, no work has considered including additional NER-specific layers beyond scoring and/or output layers. This may reflect a residual influence of the pipeline approach in which the NER task must be solved first before additional layers are used to solve the RE task. However, there is no a priori reason to think that the RE task would benefit more from additional task-specific layers than the NER task. We also note that while previous work has tackled joint NER', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they repot results only on English data?', context='types (Adverse-Effect and Drug) and a single relation type (Adverse-Effect). Of the entity instances in the dataset, 120 overlap with other entities. Similar to prior work using BIO/BILOU tagging, we remove overlapping entities. We preserve the entity with the longer span and remove any relations involving a removed entity.There are no official training, dev, and test splits for the ADE dataset, leading previous researchers to use some form of cross-validation when evaluating their models on this dataset. We split out 10% of the data to use as a held-out dev set. Final results are obtained via 10-fold cross-validation using the remaining 90% of the data and the hyperparameters obtained from tuning on the dev set. Following previous work, we report macro-averaged performance metrics averaged across each of the 10 folds.Experiments ::: CoNLL04\\tThe CoNLL04 dataset BIBREF7 consists of 1,441 sentences from news articles annotated with four entity types (Location, Organization, People, and Other) and five relation types (Works-For, Kill, Organization-Based-In, Lives-In, and Located-In). This dataset contains no overlapping entities.We use the three-way split of BIBREF16,', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they repot results only on English data?', context='value of taking the number of shared and task-specific layers to be a hyperparameter of our model architecture. As shown in Table TABREF17, the final hyperparameters used for the CoNLL04 dataset included an additional RE-specific BiRNN layer than did the final hyperparameters used for the ADE dataset. We suspect that this is due to the limited number of relations and entities in the ADE dataset. For most examples in this dataset, it is sufficient to correctly identify a single Drug entity, a single Adverse-Effect entity, and an Adverse-Effect relation between the two entities. Thus, the NER and RE tasks for this dataset are more closely related than they are in the case of the CoNLL04 dataset. Intuitively, cases in which the NER and RE problems can be solved by relying on more shared information should require fewer task-specific layers.Experiments ::: Ablation Study\\tTo further demonstrate the effectiveness of the additional task-specific BiRNN layers in our architecture, we conducted an ablation study using the CoNLL04 dataset. We trained and evaluated in the same manner described above, using the same hyperparameters, with the following exceptions:We used either (i) zero NER-specific BiRNN', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What were the variables in the ablation study?', context='value of taking the number of shared and task-specific layers to be a hyperparameter of our model architecture. As shown in Table TABREF17, the final hyperparameters used for the CoNLL04 dataset included an additional RE-specific BiRNN layer than did the final hyperparameters used for the ADE dataset. We suspect that this is due to the limited number of relations and entities in the ADE dataset. For most examples in this dataset, it is sufficient to correctly identify a single Drug entity, a single Adverse-Effect entity, and an Adverse-Effect relation between the two entities. Thus, the NER and RE tasks for this dataset are more closely related than they are in the case of the CoNLL04 dataset. Intuitively, cases in which the NER and RE problems can be solved by relying on more shared information should require fewer task-specific layers.Experiments ::: Ablation Study\\tTo further demonstrate the effectiveness of the additional task-specific BiRNN layers in our architecture, we conducted an ablation study using the CoNLL04 dataset. We trained and evaluated in the same manner described above, using the same hyperparameters, with the following exceptions:We used either (i) zero NER-specific BiRNN', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What were the variables in the ablation study?', context='layers, (ii) zero RE-specific BiRNN layers, or (iii) zero task-specific BiRNN layers of any kind.We increased the number of shared BiRNN layers to keep the total number of model parameters consistent with the number of parameters in the baseline model.We average the results for each set of hyperparameter across three trials with random weight initializations.Table TABREF26 contains the results from the ablation study. These results show that the proposed architecture benefits from the inclusion of both NER- and RE-specific layers. However, the RE task benefits much more from the inclusion of these task-specific layers than does the NER task. We take this to reflect the fact that the RE task is more difficult than the NER task for the CoNLL04 dataset, and therefore benefits the most from its own task-specific layers. This is consistent with the fact that the hyperparameter setting that performs best on the RE task is that with no NER-specific BiRNN layers, i.e. the setting that retained RE-specific BiRNN layers. In contrast, the inclusion of task-specific BiRNN layers of any kind had relatively little impact on the performance on the NER task.Note that the setting with no NER-specific layers is somewhat similar to', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What were the variables in the ablation study?', context='the cross-entropy loss for the NER and RE outputs, respectively, then the total model loss is given by $\\\\mathcal {L} = \\\\mathcal {L}_{NER} + \\\\lambda ^r \\\\mathcal {L}_{RE}$. The weight $\\\\lambda ^r$ is treated as a hyperparameter and allows for tuning the relative importance of the NER and RE tasks during training. Final training for both datasets used a value of 5 for $\\\\lambda ^r$.For the ADE dataset, we trained using the Adam optimizer with a mini-batch size of 16. For the CoNLL04 dataset, we used the Nesterov Adam optimizer with and a mini-batch size of 2. For both datasets, we used a learning rate of $5\\\\times 10^{-4}$, During training, dropout was applied before each BiRNN layer, other than the character BiRNN layer, and before the RE scoring layer.Experiments\\tWe evaluate the architecture described above using the following two publicly available datasets.Experiments ::: ADE\\tThe Adverse Drug Events (ADE) dataset BIBREF6 consists of 4,272 sentences describing adverse effects from the use of particular drugs. The text is annotated using two entity', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What were the variables in the ablation study?', context='addition to precision, recall, and F1 scores for both tasks, we show the average of the F1 scores across both tasks. On the ADE dataset, we achieve SOTA results for both the NER and RE tasks. On the CoNLL04 dataset, we achieve SOTA results on the NER task, while our performance on the RE task is competitive with other recent models. On both datasets, we achieve SOTA results when considering the average F1 score across both tasks. The largest gain relative to the previous SOTA performance is on the RE task of the ADE dataset, where we see an absolute improvement of 4.5 on the macro-average F1 score.While the model of Eberts and Ulges eberts2019span outperforms our proposed architecture on the CoNLL04 RE task, their results come at the cost of greater model complexity. As mentioned above, Eberts and Ulges fine-tune the BERTBASE model, which has 110 million trainable parameters. In contrast, given the hyperparameters used for final training on the CoNLL04 dataset, our proposed architecture has approximately 6 million trainable parameters.The fact that the optimal number of task-specific layers differed between the two datasets demonstrates the', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What were the variables in the ablation study?', context='types (Adverse-Effect and Drug) and a single relation type (Adverse-Effect). Of the entity instances in the dataset, 120 overlap with other entities. Similar to prior work using BIO/BILOU tagging, we remove overlapping entities. We preserve the entity with the longer span and remove any relations involving a removed entity.There are no official training, dev, and test splits for the ADE dataset, leading previous researchers to use some form of cross-validation when evaluating their models on this dataset. We split out 10% of the data to use as a held-out dev set. Final results are obtained via 10-fold cross-validation using the remaining 90% of the data and the hyperparameters obtained from tuning on the dev set. Following previous work, we report macro-averaged performance metrics averaged across each of the 10 folds.Experiments ::: CoNLL04\\tThe CoNLL04 dataset BIBREF7 consists of 1,441 sentences from news articles annotated with four entity types (Location, Organization, People, and Other) and five relation types (Works-For, Kill, Organization-Based-In, Lives-In, and Located-In). This dataset contains no overlapping entities.We use the three-way split of BIBREF16,', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How many shared layers are in the system?', context='layers, (ii) zero RE-specific BiRNN layers, or (iii) zero task-specific BiRNN layers of any kind.We increased the number of shared BiRNN layers to keep the total number of model parameters consistent with the number of parameters in the baseline model.We average the results for each set of hyperparameter across three trials with random weight initializations.Table TABREF26 contains the results from the ablation study. These results show that the proposed architecture benefits from the inclusion of both NER- and RE-specific layers. However, the RE task benefits much more from the inclusion of these task-specific layers than does the NER task. We take this to reflect the fact that the RE task is more difficult than the NER task for the CoNLL04 dataset, and therefore benefits the most from its own task-specific layers. This is consistent with the fact that the hyperparameter setting that performs best on the RE task is that with no NER-specific BiRNN layers, i.e. the setting that retained RE-specific BiRNN layers. In contrast, the inclusion of task-specific BiRNN layers of any kind had relatively little impact on the performance on the NER task.Note that the setting with no NER-specific layers is somewhat similar to', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How many shared layers are in the system?', context=\"the setup of Nguyen and Verspoor's nguyen2019end model, but includes an additional shared and an additional RE-specific layer. That this setting outperforms Nguyen et al.'s model reflects the contribution of having deeper shared and RE-specific layers, separate from the contribution of NER-specific layers.Conclusion\\tOur results demonstrate the utility of using deeper task-specificity in models for joint NER and RE and of tuning the level of task-specificity separately for different datasets. We conclude that prior work on joint NER and RE undervalues the importance of task-specificity. More generally, these results underscore the importance of correctly balancing the number of shared and task-specific parameters in MTL.We note that other approaches that employ a single model architecture across different datasets are laudable insofar as we should prefer models that can generalize well across domains with little domain-specific hyperparameter tuning. On the other hand, the similarity between the NER and RE tasks varies across domains, and improved performance can be achieved on these tasks by tuning the number of shared and task-specific parameters. In our work, we treated the number of shared and task-specific layers as a hyperparameter to be tuned for each dataset, but future work may explore ways to select this aspect of the\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How many shared layers are in the system?', context='value of taking the number of shared and task-specific layers to be a hyperparameter of our model architecture. As shown in Table TABREF17, the final hyperparameters used for the CoNLL04 dataset included an additional RE-specific BiRNN layer than did the final hyperparameters used for the ADE dataset. We suspect that this is due to the limited number of relations and entities in the ADE dataset. For most examples in this dataset, it is sufficient to correctly identify a single Drug entity, a single Adverse-Effect entity, and an Adverse-Effect relation between the two entities. Thus, the NER and RE tasks for this dataset are more closely related than they are in the case of the CoNLL04 dataset. Intuitively, cases in which the NER and RE problems can be solved by relying on more shared information should require fewer task-specific layers.Experiments ::: Ablation Study\\tTo further demonstrate the effectiveness of the additional task-specific BiRNN layers in our architecture, we conducted an ablation study using the CoNLL04 dataset. We trained and evaluated in the same manner described above, using the same hyperparameters, with the following exceptions:We used either (i) zero NER-specific BiRNN', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How many shared layers are in the system?', context=\"and RE in variety of textual domains, in all cases the number of shared and task-specific parameters is held constant across these domains.Model\\tThe architecture proposed here is inspired by several previous proposals BIBREF10, BIBREF11, BIBREF12. We treat the NER task as a sequence labeling problem using BIO labels. Token representations are first passed through a series of shared, BiRNN layers. Stacked on top of these shared BiRNN layers is a sequence of task-specific BiRNN layers for both the NER and RE tasks. We take the number of shared and task-specific layers to be a hyperparameter of the model. Both sets of task-specific BiRNN layers are followed by task-specific scoring and output layers. Figure FIGREF4 illustrates this architecture. Below, we use superscript $e$ for NER-specific variables and layers and superscript $r$ for RE-specific variables and layers.Model ::: Shared Layers\\tWe obtain contextual token embeddings using the pre-trained ELMo 5.5B model BIBREF13. For each token in the input text $t_i$, this model returns three vectors, which we combine via a weighted averaging layer. Each token $t_i$'s weighted ELMo embedding\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How many shared layers are in the system?', context='Bekoulis et al. bekoulis2018joint propose models in which token representations first pass through one or more shared BiLSTM layers. Katiyar and Cardie use a softmax layer to tag tokens with BILOU tags to solve the NER task and use an attention layer to detect relations between each pair of entities. Bekoulis et al., following Lample et al. Lample2016, use a conditional random field (CRF) layer to produce BIO tags for the NER task. The output from the shared BiLSTM layer for every pair of tokens is passed through relation scoring and sigmoid layers to predict relations.A second method of incorporating greater task-specificity into these models is via deeper layers for solving the RE task. Miwa and Bansal miwa-bansal-2016-end and Li et al. li2017neural pass token representations through a BiLSTM layer and then use a softmax layer to label each token with the appropriate BILOU label. Both proposals then use a type of tree-structured bidirectional LSTM layer stacked on top of the shared BiLSTM to solve the RE task. Nguyen and Verspoor nguyen2019end use BiLSTM and CRF layers to perform the', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How many additional task-specific layers are introduced?', context='layers, (ii) zero RE-specific BiRNN layers, or (iii) zero task-specific BiRNN layers of any kind.We increased the number of shared BiRNN layers to keep the total number of model parameters consistent with the number of parameters in the baseline model.We average the results for each set of hyperparameter across three trials with random weight initializations.Table TABREF26 contains the results from the ablation study. These results show that the proposed architecture benefits from the inclusion of both NER- and RE-specific layers. However, the RE task benefits much more from the inclusion of these task-specific layers than does the NER task. We take this to reflect the fact that the RE task is more difficult than the NER task for the CoNLL04 dataset, and therefore benefits the most from its own task-specific layers. This is consistent with the fact that the hyperparameter setting that performs best on the RE task is that with no NER-specific BiRNN layers, i.e. the setting that retained RE-specific BiRNN layers. In contrast, the inclusion of task-specific BiRNN layers of any kind had relatively little impact on the performance on the NER task.Note that the setting with no NER-specific layers is somewhat similar to', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How many additional task-specific layers are introduced?', context=\"the setup of Nguyen and Verspoor's nguyen2019end model, but includes an additional shared and an additional RE-specific layer. That this setting outperforms Nguyen et al.'s model reflects the contribution of having deeper shared and RE-specific layers, separate from the contribution of NER-specific layers.Conclusion\\tOur results demonstrate the utility of using deeper task-specificity in models for joint NER and RE and of tuning the level of task-specificity separately for different datasets. We conclude that prior work on joint NER and RE undervalues the importance of task-specificity. More generally, these results underscore the importance of correctly balancing the number of shared and task-specific parameters in MTL.We note that other approaches that employ a single model architecture across different datasets are laudable insofar as we should prefer models that can generalize well across domains with little domain-specific hyperparameter tuning. On the other hand, the similarity between the NER and RE tasks varies across domains, and improved performance can be achieved on these tasks by tuning the number of shared and task-specific parameters. In our work, we treated the number of shared and task-specific layers as a hyperparameter to be tuned for each dataset, but future work may explore ways to select this aspect of the\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How many additional task-specific layers are introduced?', context=\"and RE in variety of textual domains, in all cases the number of shared and task-specific parameters is held constant across these domains.Model\\tThe architecture proposed here is inspired by several previous proposals BIBREF10, BIBREF11, BIBREF12. We treat the NER task as a sequence labeling problem using BIO labels. Token representations are first passed through a series of shared, BiRNN layers. Stacked on top of these shared BiRNN layers is a sequence of task-specific BiRNN layers for both the NER and RE tasks. We take the number of shared and task-specific layers to be a hyperparameter of the model. Both sets of task-specific BiRNN layers are followed by task-specific scoring and output layers. Figure FIGREF4 illustrates this architecture. Below, we use superscript $e$ for NER-specific variables and layers and superscript $r$ for RE-specific variables and layers.Model ::: Shared Layers\\tWe obtain contextual token embeddings using the pre-trained ELMo 5.5B model BIBREF13. For each token in the input text $t_i$, this model returns three vectors, which we combine via a weighted averaging layer. Each token $t_i$'s weighted ELMo embedding\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How many additional task-specific layers are introduced?', context='value of taking the number of shared and task-specific layers to be a hyperparameter of our model architecture. As shown in Table TABREF17, the final hyperparameters used for the CoNLL04 dataset included an additional RE-specific BiRNN layer than did the final hyperparameters used for the ADE dataset. We suspect that this is due to the limited number of relations and entities in the ADE dataset. For most examples in this dataset, it is sufficient to correctly identify a single Drug entity, a single Adverse-Effect entity, and an Adverse-Effect relation between the two entities. Thus, the NER and RE tasks for this dataset are more closely related than they are in the case of the CoNLL04 dataset. Intuitively, cases in which the NER and RE problems can be solved by relying on more shared information should require fewer task-specific layers.Experiments ::: Ablation Study\\tTo further demonstrate the effectiveness of the additional task-specific BiRNN layers in our architecture, we conducted an ablation study using the CoNLL04 dataset. We trained and evaluated in the same manner described above, using the same hyperparameters, with the following exceptions:We used either (i) zero NER-specific BiRNN', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How many additional task-specific layers are introduced?', context='Deeper Task-Specificity Improves Joint Entity and Relation Extraction\\tMulti-task learning (MTL) is an effective method for learning related tasks, but designing MTL models necessitates deciding which and how many parameters should be task-specific, as opposed to shared between tasks. We investigate this issue for the problem of jointly learning named entity recognition (NER) and relation extraction (RE) and propose a novel neural architecture that allows for deeper task-specificity than does prior work. In particular, we introduce additional task-specific bidirectional RNN layers for both the NER and RE tasks and tune the number of shared and task-specific layers separately for different datasets. We achieve state-of-the-art (SOTA) results for both tasks on the ADE dataset; on the CoNLL04 dataset, we achieve SOTA results on the NER task and competitive results on the RE task while using an order of magnitude fewer trainable parameters than the current SOTA architecture. An ablation study confirms the importance of the additional task-specific layers for achieving these results. Our work suggests that previous solutions to joint NER and RE undervalue task-specificity and demonstrates the importance of correctly balancing the number of shared and task-specific parameters for MTL approaches in', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='At which interval do they extract video and audio frames?', context='adds further complexity and challenges. Limited availability of such data is one of the challenges. Apart from the avsd dataset, there does not exist a video dialogue dataset to the best of our knowledge and the avsd data itself is fairly limited in size. Extracting relevant features from videos also contains the inherent complexity of extracting features from individual frames and additionally requires understanding their temporal interaction. The temporal nature of videos also makes it important to be able to focus on a varying-length subset of video frames as the action which is being asked about might be happening within them. There is also the need to encode the additional modality of audio which would be required for answering questions that rely on the audio track. With limited size of publicly available datasets based on the visual modality, learning useful features from high dimensional visual data has been a challenge even for the visdial dataset, and we anticipate this to be an even more significant challenge on the avsd dataset as it involves videos.On the avsd task, BIBREF11 train an attention-based audio-visual scene-aware dialogue model which we use as the baseline model for this paper. They divide each video into multiple equal-duration segments and, from each of them, extract video features using an I3D BIBREF21 model,', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='At which interval do they extract video and audio frames?', context='of multi-modal dialogue is fusing modalities with varying information density. On AVSD, it is easiest to learn from the input text, while video features remain largely opaque to the decoder. modelname uses a generalization of FiLM to video that conditions video feature extraction on a question. However, similar to related work, absolute improvements of incorporating video features into dialogue are consistent but small. Thus, while our results indicate the suitability of our FiLM generalization, they also highlight that applications at the intersection between language and video are currently constrained by the quality of video features, and emphasizes the need for larger datasets.', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='At which interval do they extract video and audio frames?', context='From FiLM to Video: Multi-turn Question Answering with Multi-modal Context\\tUnderstanding audio-visual content and the ability to have an informative conversation about it have both been challenging areas for intelligent systems. The Audio Visual Scene-aware Dialog (AVSD) challenge, organized as a track of the Dialog System Technology Challenge 7 (DSTC7), proposes a combined task, where a system has to answer questions pertaining to a video given a dialogue with previous question-answer pairs and the video itself. We propose for this task a hierarchical encoder-decoder model which computes a multi-modal embedding of the dialogue context. It first embeds the dialogue history using two LSTMs. We extract video and audio frames at regular intervals and compute semantic features using pre-trained I3D and VGGish models, respectively. Before summarizing both modalities into fixed-length vectors using LSTMs, we use FiLM blocks to condition them on the embeddings of the current question, which allows us to reduce the dimensionality considerably. Finally, we use an LSTM decoder that we train with scheduled sampling and evaluate using beam search. Compared to the modality-fusing baseline model released by the AVSD challenge organizers, our model achieves a relative improvements of more than 16%, scoring', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='At which interval do they extract video and audio frames?', context='novel FiLM-based audio-visual feature extractor for videos and an auxiliary multi-task learning-based decoder for decoding a summary of the video. It outperforms the baseline results for the avsd dataset BIBREF11 and was ranked 2nd overall among the dstc7 avsd challenge participants.In Section SECREF2 , we discuss existing literature on end-to-end dialogue systems with a special focus on multi-modal dialogue systems. Section SECREF3 describes the avsd dataset. In Section SECREF4 , we present the architecture of our modelname model. We describe our evaluation and experimental setup in Section SECREF5 and then conclude in Section SECREF6 .Related Work\\tWith the availability of large conversational corpora from sources like Reddit and Twitter, there has been a lot of recent work on end-to-end modelling of dialogue for open domains. BIBREF12 treated dialogue as a machine translation problem where they translate from the stimulus to the response. They observed this to be more challenging than machine translation tasks due the larger diversity of possible responses. Among approaches that just use the previous utterance to generate the current response, BIBREF13 proposed a response generation model based on the encoder decoder framework. BIBREF14 also proposed an encoder-decoder based neural network', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='At which interval do they extract video and audio frames?', context='same code for evaluation metrics as used by BIBREF11 for fairness and comparability. We attribute the significant performance gain of our model over the baseline to a combination of several factors as described below:Our primary architectural differences over the baseline model are: not concatenating the question, answer pairs before encoding them, the auxiliary decoder module, and using the Time-Extended FiLM module for feature extraction. These, combined with using scheduled sampling and running hyperparameter optimization over the validation set to select hyperparameters, give us the observed performance boost.We observe that our models generate fairly relevant responses to questions in the dialogues, and models with audio-visual inputs respond to audio-visual questions (e.g. “is there any voices or music ?”) correctly more often.We conduct an ablation study on the effectiveness of different components (eg., text, video and audio) and present it in Table TABREF24 . Our experiments show that:Conclusions\\tWe presented modelname, a state-of-the-art dialogue model for conversations about videos. We evaluated the model on the official AVSD test set, where it achieves a relative improvement of more than 16% over the baseline model on BLEU-4 and more than 33% on CIDEr. The challenging aspect', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they use pretrained word vectors for dialogue context embedding?', context=\"utterance-level encoders, thus maintaining the dialogue state and dialogue coherence. We use the final hidden states of the utterance-level encoders in the attention mechanism that is applied to the outputs of the description, video, and audio encoders. The attended features from these encoders are fused with the dialogue-level encoder's hidden states. An utterance-level decoder decodes the response for each such dialogue state following a question. We also add an auxiliary decoding module which is similar to the response decoder except that it tries to generate the caption and/or the summary of the video. We present our model in Figure FIGREF2 and describe the individual components in detail below.Utterance-level Encoder\\tThe utterance-level encoder is a recurrent neural network consisting of a single layer of lstm cells. The input to the lstm are word embeddings for each word in the utterance. The utterance is concatenated with a special symbol <eos> marking the end of the sequence. We initialize our word embeddings using 300-dimensional GloVe BIBREF30 and then fine-tune them during training. For words not present in the GloVe vocabulary, we initialize their word embeddings from a random uniform distribution.Description Encoder\\tSimilar to the utterance-level encoder, the description encoder is also a\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they use pretrained word vectors for dialogue context embedding?', context='From FiLM to Video: Multi-turn Question Answering with Multi-modal Context\\tUnderstanding audio-visual content and the ability to have an informative conversation about it have both been challenging areas for intelligent systems. The Audio Visual Scene-aware Dialog (AVSD) challenge, organized as a track of the Dialog System Technology Challenge 7 (DSTC7), proposes a combined task, where a system has to answer questions pertaining to a video given a dialogue with previous question-answer pairs and the video itself. We propose for this task a hierarchical encoder-decoder model which computes a multi-modal embedding of the dialogue context. It first embeds the dialogue history using two LSTMs. We extract video and audio frames at regular intervals and compute semantic features using pre-trained I3D and VGGish models, respectively. Before summarizing both modalities into fixed-length vectors using LSTMs, we use FiLM blocks to condition them on the embeddings of the current question, which allows us to reduce the dimensionality considerably. Finally, we use an LSTM decoder that we train with scheduled sampling and evaluate using beam search. Compared to the modality-fusing baseline model released by the AVSD challenge organizers, our model achieves a relative improvements of more than 16%, scoring', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they use pretrained word vectors for dialogue context embedding?', context='of multi-modal dialogue is fusing modalities with varying information density. On AVSD, it is easiest to learn from the input text, while video features remain largely opaque to the decoder. modelname uses a generalization of FiLM to video that conditions video feature extraction on a question. However, similar to related work, absolute improvements of incorporating video features into dialogue are consistent but small. Thus, while our results indicate the suitability of our FiLM generalization, they also highlight that applications at the intersection between language and video are currently constrained by the quality of video features, and emphasizes the need for larger datasets.', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they use pretrained word vectors for dialogue context embedding?', context='utterance-level encoder then generates an attention map over the hidden states of its lstm, which is multiplied by the hidden states to provide the output of this module. We also experimented with using convolutional Mixed_5c features to capture spatial information but on the limited avsd dataset they did not yield any improvement. When not using the FiLM blocks, we use the final layer I3D features (provided by the avsd organizers) and encode them with the lstm directly, followed by the attention step. We present the video encoder in Figure FIGREF3 .Audio Encoder\\tThe audio encoder is structurally similar to the video encoder. We use the VGGish features provided by the avsd challenge organizers. Also similar to the video encoder, when not using the FiLM blocks, we use the VGGish features and encode them with the lstm directly, followed by the attention step. The audio encoder is depicted in Figure FIGREF4 .Fusing Modalities for Dialogue Context\\tThe outputs of the encoders for past utterances, descriptions, video, and audio together form the dialogue context INLINEFORM0 which is the input of the decoder. We first combine past utterances using a dialogue-level encoder which is a single-layer lstm recurrent neural network. The input to this encoder', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they use pretrained word vectors for dialogue context embedding?', context=\"of the dialogue history do better than models that do not use dialogue history at all. Unexpectedly, the performance between models that use the image features and models that do no use these features is not significantly different. As we discussed in Section SECREF1 , this is similar to the issues vqa models faced initially due to the imbalanced nature of the dataset, which leads us to believe that language is a strong prior on the visdial dataset too. BIBREF20 train two separate agents to play a cooperative game where one agent has to answer the other agent's questions, which in turn has to predict the fc7 features of the Image obtained from VGGNet. Both agents are based on hred models and they show that agents fine-tuned with rl outperform agents trained solely with supervised learning. BIBREF18 train both generative and discriminative deep neural models on the igc dataset, where the task is to generate questions and answers to carry on a meaningful conversation. BIBREF19 train hred-based models on GuessWhat?! dataset in which agents have to play a guessing game where one player has to find an object in the picture which the other player knows about and can answer questions about them.Moving from image-based dialogue to video-based dialogue\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they train a different training method except from scheduled sampling?', context='ask questions. The answerer sees the video and answers the questions asked by the questioner. After 10 such qa turns, the questioner wraps up by writing a summary of the video based on the conversation.Dataset statistics such as the number of dialogues, turns, and words for the avsd dataset are presented in Table TABREF5 . For the initially released prototype dataset, the training set of the avsd dataset corresponds to videos taken from the training set of the Charades dataset while the validation and test sets of the avsd dataset correspond to videos taken from the validation set of the Charades dataset. For the official dataset, training, validation and test sets are drawn from the corresponding Charades sets.The Charades dataset also provides additional annotations for the videos such as action, scene, and object annotations, which are considered to be external data sources by the avsd challenge, for which there is a special sub-task in the challenge. The action annotations also include the start and end time of the action in the video.Models\\tOur modelname model is based on the hred framework for modelling dialogue systems. In our model, an utterance-level recurrent lstm encoder encodes utterances and a dialogue-level recurrent lstm encoder encodes the final hidden states of the', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they train a different training method except from scheduled sampling?', context=\"of the dialogue history do better than models that do not use dialogue history at all. Unexpectedly, the performance between models that use the image features and models that do no use these features is not significantly different. As we discussed in Section SECREF1 , this is similar to the issues vqa models faced initially due to the imbalanced nature of the dataset, which leads us to believe that language is a strong prior on the visdial dataset too. BIBREF20 train two separate agents to play a cooperative game where one agent has to answer the other agent's questions, which in turn has to predict the fc7 features of the Image obtained from VGGNet. Both agents are based on hred models and they show that agents fine-tuned with rl outperform agents trained solely with supervised learning. BIBREF18 train both generative and discriminative deep neural models on the igc dataset, where the task is to generate questions and answers to carry on a meaningful conversation. BIBREF19 train hred-based models on GuessWhat?! dataset in which agents have to play a guessing game where one player has to find an object in the picture which the other player knows about and can answer questions about them.Moving from image-based dialogue to video-based dialogue\", score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they train a different training method except from scheduled sampling?', context='conversations do not need to happen in a deterministic ordering of turns. These per-turn evaluation metrics are mostly word-overlap-based metrics such as BLEU, METEOR, ROUGE, and CIDEr, borrowed from the machine translation literature. Due to the diverse nature of possible responses, world-overlap metrics are not highly suitable for evaluating these tasks. Human evaluation of generated responses is considered the most reliable metric for such tasks but it is cost prohibitive and hence the dialogue system literature continues to rely widely on word-overlap-based metrics.The avsd dataset and challenge\\tThe avsd dataset BIBREF28 consists of dialogues collected via amt. Each dialogue is associated with a video from the Charades BIBREF29 dataset and has conversations between two amt workers related to the video. The Charades dataset has multi-action short videos and it provides text descriptions for these videos, which the avsd challenge also distributes as the caption. The avsd dataset has been collected using similar methodology as the visdial dataset. In avsd, each dialogue turn consists of a question and answer pair. One of the amt workers assumes the role of questioner while the other amt worker assumes the role of answerer. The questioner sees three static frames from the video and has to', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they train a different training method except from scheduled sampling?', context='architecture that uses the previous two utterances to generate the current response. Among discriminative methods (i.e. methods that produce a score for utterances from a set and then rank them), BIBREF15 proposed a neural architecture to select the best next response from a list of responses by measuring their similarity to the dialogue context. BIBREF16 extended prior work on encoder-decoder-based models to multi-turn conversations. They trained a hierarchical model called hred for generating dialogue utterances where a recurrent neural network encoder encodes each utterance. A higher-level recurrent neural network maintains the dialogue state by further encoding the individual utterance encodings. This dialogue state is then decoded by another recurrent decoder to generate the response at that point in time. In followup work, BIBREF17 used a latent stochastic variable to condition the generation process which aided their model in producing longer coherent outputs that better retain the context.Datasets and tasks BIBREF10 , BIBREF18 , BIBREF19 have also been released recently to study visual-input based conversations. BIBREF10 train several generative and discriminative deep neural models for the visdial task. They observe that on this task, discriminative models outperform generative models and that models making better use', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Do they train a different training method except from scheduled sampling?', context='the decoder LSTM output, r*t,m-1 ={ll rt,m-1 ; s>0.2, sU(0, 1)v INLINEFORM0 ; else . is given by scheduled sampling BIBREF32 , and INLINEFORM1 is a symbol denoting the start of a sequence. We optimize the model using the AMSGrad algorithm BIBREF33 and use a per-condition random search to determine hyperparameters. We train the model using the BLEU-4 score on the validation set as our stopping citerion.Experiments\\tThe avsd challenge tasks we address here are:We train our modelname model for Task 1.a and Task 2.a of the challenge and we present the results in Table TABREF9 . Our model outperforms the baseline model released by BIBREF11 on all of these tasks. The scores for the winning team have been released to challenge participants and are also included. Their approach, however, is not public as of yet. We observe the following for our models:Since the official test set has not been released publicly, results reported on the official test set have been provided by the challenge organizers. For the prototype test set and for the ablation study presented in Table TABREF24 , we use the', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='By how much do they outperform standard BERT?', context='shows the results of our experiments. As prescribed by the shared task, the essential evaluation metric is the micro-averaged F1-score. All scores reported in this paper are obtained using models that are trained on the training set and evaluated on the validation set. For the final submission to the shared task competition, the best-scoring setup is used and trained on the training and validation sets combined.We are able to demonstrate that incorporating metadata features and author embeddings leads to better results for both sub-tasks. With an F1-score of 87.20 for task A and 64.70 for task B, the setup using BERT-German with metadata features and author embeddings (1) outperforms all other setups. Looking at the precision score only, BERT-German with metadata features (2) but without author embeddings performs best.In comparison to the baseline (7), our evaluation shows that deep transformer models like BERT considerably outperform the classical TF-IDF approach, also when the input is the same (using the title and blurb only). BERT-German (4) and BERT-Multilingual (5) are only using text-based features (title and blurb), whereby the text representations of the BERT-layers are directly fed into', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='By how much do they outperform standard BERT?', context=\"books exceed our cut-off point of 300 words per blurb. In addition, the distribution of labeled books is imbalanced, i.e. for many classes only a single digit number of training instances exist (Fig. FIGREF38). Thus, this task can be considered a low resource scenario, where including related data (such as author embeddings and author identity features such as gender and academic title) or making certain characteristics more explicit (title and blurb length statistics) helps. Furthermore, it should be noted that the blurbs do not provide summary-like abstracts of the book, but instead act as teasers, intended to persuade the reader to buy the book.As reflected by the recent popularity of deep transformer models, they considerably outperform the Logistic Regression baseline using TF-IDF representation of the blurbs. However, for the simpler sub-task A, the performance difference between the baseline model and the multilingual BERT model is only six points, while consuming only a fraction of BERT's computing resources. The BERT model trained for German (from scratch) outperforms the multilingual BERT model by under three points for sub-task A and over six points for sub-task B, confirming the findings reported by the creators of the BERT-German models for\", score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='By how much do they outperform standard BERT?', context='authors, the embedding of the first author for which an embedding is available is used. Following this method, we are able to retrieve embeddings for 72% of the books in the training and test set (see Table TABREF21).Experiments ::: Pre-trained German Language Model\\tAlthough the pre-trained BERT language models are multilingual and, therefore, support German, we rely on a BERT model that was exclusively pre-trained on German text, as published by the German company Deepset AI. This model was trained from scratch on the German Wikipedia, news articles and court decisions. Deepset AI reports better performance for the German BERT models compared to the multilingual models on previous German shared tasks (GermEval2018-Fine and GermEval 2014).Experiments ::: Model Architecture\\tOur neural network architecture, shown in Figure FIGREF31, resembles the original BERT model BIBREF1 and combines text- and non-text features with a multilayer perceptron (MLP).The BERT architecture uses 12 hidden layers, each layer consists of 768 units. To derive contextualized representations from textual features, the book title and blurb are concatenated and then fed through BERT. To minimize the GPU memory consumption, we limit the', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='By how much do they outperform standard BERT?', context='the classification layer.To establish the information gain of author embeddings, we train a linear classifier on author embeddings, using this as the only feature. The author-only model (6) is exclusively evaluated on books for which author embeddings are available, so the numbers are based on a slightly smaller validation set. With an F1-score of 61.99 and 32.13 for sub-tasks A and B, respectively, the author model yields the worst result. However, the information contained in the author embeddings help improve performance, as the results of the best-performing setup show. When evaluating the best model (1) only on books for that author embeddings are available, we find a further improvement with respect to F1 score (task A: from 87.20 to 87.81; task B: 64.70 to 65.74).Discussion\\tThe best performing setup uses BERT-German with metadata features and author embeddings. In this setup the most data is made available to the model, indicating that, perhaps not surprisingly, more data leads to better classification performance. We expect that having access to the actual text of the book will further increase performance. The average number of words per blurb is 95 and only 0.25% of', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='By how much do they outperform standard BERT?', context='Enriching BERT with Knowledge Graph Embeddings for Document Classification\\tIn this paper, we focus on the classification of books using short descriptive texts (cover blurbs) and additional metadata. Building upon BERT, a deep neural language model, we demonstrate how to combine text representations with metadata and knowledge graph embeddings, which encode author information. Compared to the standard BERT approach we achieve considerably better results for the classification task. For a more coarse-grained classification using eight labels we achieve an F1- score of 87.20, while a detailed classification using 343 labels yields an F1-score of 64.70. We make the source code and trained models of our experiments publicly available\\tIntroduction\\tWith ever-increasing amounts of data available, there is an increase in the need to offer tooling to speed up processing, and eventually making sense of this data. Because fully-automated tools to extract meaning from any given input to any desired level of detail have yet to be developed, this task is still at least supervised, and often (partially) resolved by humans; we refer to these humans as knowledge workers. Knowledge workers are professionals that have to go through large amounts of data and consolidate, prepare and process it on a daily basis. This data', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What dataset do they use?', context='representations. They test their approach on six large scale text classification problems and outperform previous methods substantially by increasing accuracy by about 3 to 4 percentage points. BIBREF11 (the organisers of the GermEval 2019 shared task on hierarchical text classification) use shallow capsule networks, reporting that these work well on structured data for example in the field of visual inference, and outperform CNNs, LSTMs and SVMs in this area. They use the Web of Science (WOS) dataset and introduce a new real-world scenario dataset called Blurb Genre Collection (BGC).With regard to external resources to enrich the classification task, BIBREF12 experiment with external knowledge graphs to enrich embedding information in order to ultimately improve language understanding. They use structural knowledge represented by Wikidata entities and their relation to each other. A mix of large-scale textual corpora and knowledge graphs is used to further train language representation exploiting ERNIE BIBREF13, considering lexical, syntactic, and structural information. BIBREF14 propose and evaluate an approach to improve text classification with knowledge from Wikipedia. Based on a bag of words approach, they derive a thesaurus of concepts from Wikipedia and use it for document expansion. The resulting document representation improves the performance', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What dataset do they use?', context='of our experiments and the trained models are publicly available.Future work comprises the use of hierarchical information in a post-processing step to refine the classification. Another promising approach to tackle the low resource problem for task B would be to use label embeddings. Many labels are similar and semantically related. The relationships between labels can be utilized to model in a joint embedding space BIBREF17. However, a severe challenge with regard to setting up label embeddings is the quite heterogeneous category system that can often be found in use online. The Random House taxonomy (see above) includes category names, i. e., labels, that relate to several different dimensions including, among others, genre, topic and function.This work is done in the context of a larger project that develops a platform for curation technologies. Under the umbrella of this project, the classification of pieces of incoming text content according to an ontology is an important step that allows the routing of this content to particular, specialized processing workflows, including parameterising the included pipelines. Depending on content type and genre, it may make sense to apply OCR post-processing (for digitized books from centuries ago), machine translation (for content in languages unknown to the user), information extraction, or other particular and specialized procedures. Constructing such a generic ontology for', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What dataset do they use?', context='digital content is a challenging task, and classification performance is heavily dependent on input data (both in shape and amount) and on the nature of the ontology to be used (in the case of this paper, the one predefined by the shared task organisers). In the context of our project, we continue to work towards a maximally generic content ontology, and at the same time towards applied classification architectures such as the one presented in this paper.Acknowledgments\\tThis research is funded by the German Federal Ministry of Education and Research (BMBF) through the “Unternehmen Region”, instrument “Wachstumskern” QURATOR (grant no. 03WKDA1A). We would like to thank the anonymous reviewers for comments on an earlier version of this manuscript.', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What dataset do they use?', context='shows the results of our experiments. As prescribed by the shared task, the essential evaluation metric is the micro-averaged F1-score. All scores reported in this paper are obtained using models that are trained on the training set and evaluated on the validation set. For the final submission to the shared task competition, the best-scoring setup is used and trained on the training and validation sets combined.We are able to demonstrate that incorporating metadata features and author embeddings leads to better results for both sub-tasks. With an F1-score of 87.20 for task A and 64.70 for task B, the setup using BERT-German with metadata features and author embeddings (1) outperforms all other setups. Looking at the precision score only, BERT-German with metadata features (2) but without author embeddings performs best.In comparison to the baseline (7), our evaluation shows that deep transformer models like BERT considerably outperform the classical TF-IDF approach, also when the input is the same (using the title and blurb only). BERT-German (4) and BERT-Multilingual (5) are only using text-based features (title and blurb), whereby the text representations of the BERT-layers are directly fed into', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What dataset do they use?', context=\"input length to 300 tokens (which is shorter than BERT's hard-coded limit of 512 tokens). Only 0.25% of blurbs in the training set consist of more than 300 words, so this cut-off can be expected to have minor impact.The non-text features are generated in a separate preprocessing step. The metadata features are represented as a ten-dimensional vector (two dimensions for gender, see Section SECREF10). Author embedding vectors have a length of 200 (see Section SECREF22). In the next step, all three representations are concatenated and passed into a MLP with two layers, 1024 units each and ReLu activation function. During training, the MLP is supposed to learn a non-linear combination of its input representations. Finally, the output layer does the actual classification. In the SoftMax output layer each unit corresponds to a class label. For sub-task A the output dimension is eight. We treat sub-task B as a standard multi-label classification problem, i. e., we neglect any hierarchical information. Accordingly, the output layer for sub-task B has 343 units. When the value of an output unit is above a given threshold the corresponding label is predicted, whereby thresholds are defined separately for each class. The optimum was found\", score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How do they combine text representations with the knowledge graph embeddings?', context='Enriching BERT with Knowledge Graph Embeddings for Document Classification\\tIn this paper, we focus on the classification of books using short descriptive texts (cover blurbs) and additional metadata. Building upon BERT, a deep neural language model, we demonstrate how to combine text representations with metadata and knowledge graph embeddings, which encode author information. Compared to the standard BERT approach we achieve considerably better results for the classification task. For a more coarse-grained classification using eight labels we achieve an F1- score of 87.20, while a detailed classification using 343 labels yields an F1-score of 64.70. We make the source code and trained models of our experiments publicly available\\tIntroduction\\tWith ever-increasing amounts of data available, there is an increase in the need to offer tooling to speed up processing, and eventually making sense of this data. Because fully-automated tools to extract meaning from any given input to any desired level of detail have yet to be developed, this task is still at least supervised, and often (partially) resolved by humans; we refer to these humans as knowledge workers. Knowledge workers are professionals that have to go through large amounts of data and consolidate, prepare and process it on a daily basis. This data', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How do they combine text representations with the knowledge graph embeddings?', context='More precisely, we enrich BERT, as our pre-trained text representation model, with knowledge graph embeddings that are based on Wikidata BIBREF2, add metadata provided by the shared task organisers (title, author(s), publishing date, etc.) and collect additional information on authors for this particular document classification task. As we do not rely on text-based features alone but also utilize document metadata, we consider this as a document classification problem. The proposed approach is an attempt to solve this problem exemplary for single dataset provided by the organisers of the shared task.Related Work\\tA central challenge in work on genre classification is the definition of a both rigid (for theoretical purposes) and flexible (for practical purposes) mode of representation that is able to model various dimensions and characteristics of arbitrary text genres. The size of the challenge can be illustrated by the observation that there is no clear agreement among researchers regarding actual genre labels or their scope and consistency. There is a substantial amount of previous work on the definition of genre taxonomies, genre ontologies, or sets of labels BIBREF3, BIBREF4, BIBREF5, BIBREF6, BIBREF7. Since we work with the dataset provided by the organisers of the 2019 GermEval shared', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How do they combine text representations with the knowledge graph embeddings?', context='one should not judge a book by its cover, we argue that additional information on the author can support the classification task. Authors often adhere to their specific style of writing and are likely to specialize in a specific genre.To be precise, we want to include author identity information, which can be retrieved by selecting particular properties from, for example, the Wikidata knowledge graph (such as date of birth, nationality, or other biographical features). A drawback of this approach, however, is that one has to manually select and filter those properties that improve classification performance. This is why, instead, we follow a more generic approach and utilize automatically generated graph embeddings as author representations.Graph embedding methods create dense vector representations for each node such that distances between these vectors predict the occurrence of edges in the graph. The node distance can be interpreted as topical similarity between the corresponding authors.We rely on pre-trained embeddings based on PyTorch BigGraph BIBREF15. The graph model is trained on the full Wikidata graph, using a translation operator to represent relations. Figure FIGREF23 visualizes the locality of the author embeddings.To derive the author embeddings, we look up Wikipedia articles that match with the author names and map the articles to the corresponding Wikidata items. If a book has multiple', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How do they combine text representations with the knowledge graph embeddings?', context='can originate from highly diverse portals and resources and depending on type or category, the data needs to be channelled through specific down-stream processing pipelines. We aim to create a platform for curation technologies that can deal with such data from diverse sources and that provides natural language processing (NLP) pipelines tailored to particular content types and genres, rendering this initial classification an important sub-task.In this paper, we work with the dataset of the 2019 GermEval shared task on hierarchical text classification BIBREF0 and use the predefined set of labels to evaluate our approach to this classification task.Deep neural language models have recently evolved to a successful method for representing text. In particular, Bidirectional Encoder Representations from Transformers (BERT; BIBREF1) outperformed previous state-of-the-art methods by a large margin on various NLP tasks. We adopt BERT for text-based classification and extend the model with additional metadata provided in the context of the shared task, such as author, publisher, publishing date, etc.A key contribution of this paper is the inclusion of additional (meta) data using a state-of-the-art approach for text processing. Being a transfer learning approach, it facilitates the task solution with external knowledge for a setup in which relatively little training data is available.', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How do they combine text representations with the knowledge graph embeddings?', context='representations. They test their approach on six large scale text classification problems and outperform previous methods substantially by increasing accuracy by about 3 to 4 percentage points. BIBREF11 (the organisers of the GermEval 2019 shared task on hierarchical text classification) use shallow capsule networks, reporting that these work well on structured data for example in the field of visual inference, and outperform CNNs, LSTMs and SVMs in this area. They use the Web of Science (WOS) dataset and introduce a new real-world scenario dataset called Blurb Genre Collection (BGC).With regard to external resources to enrich the classification task, BIBREF12 experiment with external knowledge graphs to enrich embedding information in order to ultimately improve language understanding. They use structural knowledge represented by Wikidata entities and their relation to each other. A mix of large-scale textual corpora and knowledge graphs is used to further train language representation exploiting ERNIE BIBREF13, considering lexical, syntactic, and structural information. BIBREF14 propose and evaluate an approach to improve text classification with knowledge from Wikipedia. Based on a bag of words approach, they derive a thesaurus of concepts from Wikipedia and use it for document expansion. The resulting document representation improves the performance', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How accurate is their predictive model?', context='Interestingly, for almost all feature combinations the classifiers had slightly higher precision than recall for UL, while the contrary holds for FT and GN.News vs Fairy Tales. Finally, we wanted to check whether UL being “half-way\" between GN and FT can be observed in our classification experiments as well. If this hypothesis is correct, by classifying GN vs. FT we would expect to find higher performance than previous experiments. Results show that this is in fact the case. All features together performed better than all previous experiment and incredibly well (F1= 0.978), again improving over all the other subgroups of features alone ( $p<0.001$ ) apart from READ that performs equally well (F1=0.973, no statistically significant difference). Notably, all other groups of features improves over the UL vs. GN and the UL vs. FT tasks. Finally, all groups of features had a statistically significant improvement over the random baseline ( $p<0.001$ ).Three Class Classification. Finally we also tested feature predictivity on a three class classification task (UL vs GN vs FT). Since in this case we did not performed downsampling, we use the ZeroR classifier as a baseline. For the sake of interpretability of results,', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How accurate is their predictive model?', context='a machine learning framework, confirming the results emerged from the quantitative analysis part. In particular, as expected, these features gave the best results in the GN vs FT experiments, showing that these two genres represent the extremes of a continuum where ULs are placed.Ecological Experiments in the News Domain\\tAs a final validation of our feature importance we also set up experiments where we controlled for the medium where the message is delivered, specifically the online news domain. Since Newspapers exist for all kinds of stories and with all sorts of reputations for reliability we focused on two specific websites. One is the Weekly World News (WWN), a news website with very low reliability where many of the stories have the qualities of urban legends (the WWN was famous for stories about Bigfoot and UFOs, etc.). The other website is The New York Times (NYT), known for its high reliability and fact-checking procedures.We scraped the WWN for a total of 225 stories, and then randomly selected an equal amount of stories from the NYT. For both datasets we extracted the same set of features discussed in the previous sections. For every feature combination we conducted a binary classification experiment with ten-fold cross validation on the dataset. Since the dataset is balanced, this accounts for a', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How accurate is their predictive model?', context='along with precision, recall and F1 we also provide the Matthews Correlation Coefficient (MCC) which is useful for unbalanced datasets, as presented in BIBREF39 for the multiclass case. MCC returns a value between -1 and +1, where +1 represents a perfect prediction, 0 no better than random and -1 indicates total disagreement. Results are consistent with previous experiments. In Table 8 , all feature configurations show an improvement over the baseline ( $p<0.001$ ) but the temporal features (TIMEX) have far lower discriminative power as compared to others groups of features (MCC=0.339).Discussion\\tWhile between UL and GN the discrimination is given by a skillful mixture of all the prototypical features together, where none has a clear predominance over the others, between UL and FT, readability (READ) and affect (AFF) play a major role. From the summary in Table 8 we see that while ALL features together have the highest averaged F1, READ is the best performing subset of features in all experiments, followed by AFF, NER and TIMEX that perform reasonably well. n general, these experiments proved the goodness of our features in discriminating UL against FT and GN in', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How accurate is their predictive model?', context=\"the formulae, as reported in the second part of Table 5 , we see that while the percentage of complex words (either $\\\\frac{Cpx_A}{Words_A}$ or $\\\\frac{Syll_A}{Words_A}$ ) puts UL halfway between FT and GN, the average length of sentences ( $\\\\frac{Words_A}{Sent_A}$ ) is surprisingly higher for FT than GN and in turn UL. So, depending on the weight given either to complex words or to sentence length, the results in Table 5 can be interpreted.All differences in the means reported in the tables are statistically significant (Student's t-test, $p<0.001$ ) apart from TIME, between UL and FT, and DURATION, between UL and GN, (signalled with * in Table 3 ).Turning to the analysis of variance, we see that FT is – on average – a more cohesive genre, with lower standard deviations, while GN and UL have higher and closer standard deviations. In fact, all differences in the standard deviations reported in the tables are statistically significant (f-test, $p<0.001$ ) apart between UL and GN in Fog, Kincaid and in ALL sentiment (signalled with * in the respective Tables).Classification\", score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How accurate is their predictive model?', context='$p<0.001$ ). Particularly, the temporal features (TIMEX) performed worse than AFF and NE ( $p<0.001$ ). Still, all features improved over the baseline ( $p<0.001$ ).Urban Legends vs Fairy Tales. In the UL vs. FT classification task, all the features together performed better than the previous experiment (F1 = 0.897), again improving over all the other subgroups of features alone ( $p<0.001$ ). Interestingly, the best discriminative subgroup of features (still READ, F1 = 0.868) in this case reduces the lead with respect to all the features together (ALL) and improves over the others subgroups ( $p<0.001$ ) apart from the AFF group – from which has no significant difference – that in this case performs better than in the previous experiment. On the contrary, the TIMEX group had similar performances as the previous experiment, while NE improved its performance. Finally, all groups of features had a statistically significant improvement over the baseline ( $p<0.001$ ).In Table 7 we report the performances of the various classification tasks in term of precision, recall and F1 over the single classes.', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How does morphological analysis differ from morphological inflection?', context=\"latest SIGMORPHON 2019 morphological inflection shared task. Our models are implemented in DyNet BIBREF13.Inflection results are outlined in Table . In the `standard' setting we simply train on the pre-defined training set, achieving an exact-match accuracy of 60% over the test set. Interestingly, the data augmentation approach of BIBREF12 that hallucinates new training paradigms based on character level alignments does not heed significant improvements in accuracy (only 2 percentage points increase, cf. with more than 15 percentage points increases in other languages). These results indicate that automatic morphological inflection for low-resource tonal languages like SJQ Chatino poses a particularly challenging setting, which perhaps requires explicit handling of tone information by the model.Baseline Results ::: Morphological Analysis\\tMorphological analysis is the task of creating a morphosyntactic description for a given word. It can be framed in a context-agnostic manner (as in our case) or within a given context, as for instance for the SIGMORPHON 2019 second shared task BIBREF11. We trained an encoder-decoder model that receives the form as character-level input, encodes it with a BiLSTM encoder, and\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How does morphological analysis differ from morphological inflection?', context='A Resource for Studying Chatino Verbal Morphology\\tWe present the first resource focusing on the verbal inflectional morphology of San Juan Quiahije Chatino, a tonal mesoamerican language spoken in Mexico. We provide a collection of complete inflection tables of 198 lemmata, with morphological tags based on the UniMorph schema. We also provide baseline results on three core NLP tasks: morphological analysis, lemmatization, and morphological inflection.\\tIntroduction\\tThe recent years have seen unprecedented forward steps for Natural Language Processing (NLP) over almost every NLP subtask, relying on the advent of large data collections that can be leveraged to train deep neural networks. However, this progress has solely been observed in languages with significant data resources, while low-resource languages are left behind.The situation for endangered languages is usually even worse, as the focus of the scientific community mostly relies in language documentation. The typical endangered language documentation process typically includes the creation of language resources in the form of word lists, audio and video recordings, notes, or grammar fragments, with the created resources then stored into large online linguistics archives. This process is often hindered by the so-called Transcription Bottleneck, but recent advances', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How does morphological analysis differ from morphological inflection?', context='Chatino, but it also provides an exciting new data point in the computational study of morphological analysis, lemmatization, and inflection, as it is the first one in a tonal language with explicit tonal markings in the writing system. In a similar vein, the Oto-Manguean Inflectional Class Database BIBREF22 provides a valuable resource for studying the verbal morphology of Oto-Manguean languages (including a couple of other Chatino variants: Yaitepec and Zenzotepec Chatino) but not in a format suitable for computational experiments.Conclusion\\tWe presented a resource of 198 complete inflectional paradigms in San Juan Quiahije Chatino, which will facilitate research in computational morphological analysis and inflection for low-resource tonal languages and languages of Mesoamerica. We also provide strong baseline results on computational morphological analysis, lemmatization, and inflection realization, using character-level neural encoder-decoder systems.For future work, while we will keep expanding our resource to include more paradigms, we will also follow the community guidelines in extending our resource to include morphological analysis and inflection examples in context.Acknowledgements\\tPart of this work was done during the', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How does morphological analysis differ from morphological inflection?', context='corpus are outlined in Table 1 . Compared to all the other languages from the Unimorph project, this puts SJQ Chatino in the low- to mid-resource category, but nonetheless it is more than enough for benchmarking purposes.Baseline Results ::: Inflectional realization\\tInflectional realization defines the inflected forms of a lexeme/lemma. As a computational task, often referred to as simply “morphological inflection,\" inflectional realization is framed as a mapping from the pairing of a lemma with a set of morphological tags to the corresponding word form. For example, the inflectional realization of SJQ Chatino verb forms entails a mapping of the pairing of the lemma lyu1 `fall\\' with the tag-set 1;SG;PROG to the word form nlyon32.Morphological inflection has been thoroughly studied in monolingual high resource settings, especially through the recent SIGMORPHON challenges BIBREF8, BIBREF9, BIBREF10, with the latest iteration focusing more on low-resource settings, utilizing cross-lingual transfer BIBREF11. We use the guidelines of the state-of-the-art approach of BIBREF12 that achieved the highest inflection accuracy in the', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How does morphological analysis differ from morphological inflection?', context=\"BIBREF0, BIBREF1 provide promising directions towards a solution for this issue.However, language documentation and linguistic description, although extremely important itself, does not meaningfully contribute to language conservation, which aims to ensure that the language stays in use. We believe that a major avenue towards continual language use is by further creating language technologies for endangered languages, essentially elevating them to the same level as high-resource, economically or politically stronger languages.The majority of the world's languages are categorized as synthetic, meaning that they have rich morphology, be it fusional, agglutinative, polysynthetic, or a mixture thereof. As Natural Language Processing (NLP) keeps expanding its frontiers to encompass more and more languages, modeling of the grammatical functions that guide language generation is of utmost importance. It follows, then, that the next crucial step for expanding NLP research on endangered languages is creating benchmarks for standard NLP tasks in such languages.With this work we take a small first step towards this direction. We present a resource that allows for benchmarking two NLP tasks in San Juan Quiahije Chatino, an endangered language spoken in southern Mexico: morphological analysis and morphological inflection, with a focus on the verb morphology of the\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What was the criterion used for selecting the lemmata?', context='input, are outlined in Table . We find that automatic lemmatization in SJQ Chatino achieves fairly high accuracy even with our simple baseline models (89% accuracy, $0.27$ average Levenshtein distance) and that providing the gold morphological tags provides a performance boost indicated by small improvements on both metrics. It it worth noting, though, that these results are also well below the $94--95\\\\%$ average accuracy and $0.13$ average Levenshtein distance that lemmatization models achieved over 107 treebanks in 66 languages for the SIGMORPHON 2019 shared task BIBREF11.Related Work\\tOur work builds and expands upon previous work on Indigenous languages of the Americas specifically focusing on the complexity of their morphology. Among other works similar to ours, BIBREF17 focused on the morphology of Dene verbs, BIBREF18 on Arapaho verbs, BIBREF19 on Shipibo-Konibo, and BIBREF20 on Saint Lawrence Island and Central Siberian Yupik. BIBREF21 describe an approach for elicit complete inflection paradigms, with experiments in languages like Nahuatl. Our resource is the first one for SJQ', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What was the criterion used for selecting the lemmata?', context='then an attention enhanced decoder BIBREF14 outputs the corresponding sequence of morphological tags, implemented in DyNet. The baseline results are shown in Table . The exact-match accuracy of 67% is lower than the average accuracy that context-aware systems can achieve, and it highlights the challenge that the complexity of the tonal system of SJQ Chatino can pose.Baseline Results ::: Lemmatization\\tLemmatization is the task of retrieving the underlying lemma from which an inflected form was derived. Although in some languages the lemma is distinct from all forms, in SJQ Chatino the lemma is defined as the completive third-person singular form. As a computational task, lemmatization entails producing the lemma given an inflected form (and possibly, given a set of morphological tags describing the input form). Popular approaches tackle it as a character-level edit sequence generation task BIBREF15, or as a character-level sequence-to-sequence task BIBREF16. For our baseline lemmatization systems we follow the latter approach. We trained a character level encoder-decoder model, similar to the above-mentioned inflection system, implemented in DyNet.The baseline results, with and without providing gold morphological tags along with the inflected form as', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What was the criterion used for selecting the lemmata?', context='corpus are outlined in Table 1 . Compared to all the other languages from the Unimorph project, this puts SJQ Chatino in the low- to mid-resource category, but nonetheless it is more than enough for benchmarking purposes.Baseline Results ::: Inflectional realization\\tInflectional realization defines the inflected forms of a lexeme/lemma. As a computational task, often referred to as simply “morphological inflection,\" inflectional realization is framed as a mapping from the pairing of a lemma with a set of morphological tags to the corresponding word form. For example, the inflectional realization of SJQ Chatino verb forms entails a mapping of the pairing of the lemma lyu1 `fall\\' with the tag-set 1;SG;PROG to the word form nlyon32.Morphological inflection has been thoroughly studied in monolingual high resource settings, especially through the recent SIGMORPHON challenges BIBREF8, BIBREF9, BIBREF10, with the latest iteration focusing more on low-resource settings, utilizing cross-lingual transfer BIBREF11. We use the guidelines of the state-of-the-art approach of BIBREF12 that achieved the highest inflection accuracy in the', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What was the criterion used for selecting the lemmata?', context='A Resource for Studying Chatino Verbal Morphology\\tWe present the first resource focusing on the verbal inflectional morphology of San Juan Quiahije Chatino, a tonal mesoamerican language spoken in Mexico. We provide a collection of complete inflection tables of 198 lemmata, with morphological tags based on the UniMorph schema. We also provide baseline results on three core NLP tasks: morphological analysis, lemmatization, and morphological inflection.\\tIntroduction\\tThe recent years have seen unprecedented forward steps for Natural Language Processing (NLP) over almost every NLP subtask, relying on the advent of large data collections that can be leveraged to train deep neural networks. However, this progress has solely been observed in languages with significant data resources, while low-resource languages are left behind.The situation for endangered languages is usually even worse, as the focus of the scientific community mostly relies in language documentation. The typical endangered language documentation process typically includes the creation of language resources in the form of word lists, audio and video recordings, notes, or grammar fragments, with the created resources then stored into large online linguistics archives. This process is often hindered by the so-called Transcription Bottleneck, but recent advances', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What was the criterion used for selecting the lemmata?', context='Chatino, but it also provides an exciting new data point in the computational study of morphological analysis, lemmatization, and inflection, as it is the first one in a tonal language with explicit tonal markings in the writing system. In a similar vein, the Oto-Manguean Inflectional Class Database BIBREF22 provides a valuable resource for studying the verbal morphology of Oto-Manguean languages (including a couple of other Chatino variants: Yaitepec and Zenzotepec Chatino) but not in a format suitable for computational experiments.Conclusion\\tWe presented a resource of 198 complete inflectional paradigms in San Juan Quiahije Chatino, which will facilitate research in computational morphological analysis and inflection for low-resource tonal languages and languages of Mesoamerica. We also provide strong baseline results on computational morphological analysis, lemmatization, and inflection realization, using character-level neural encoder-decoder systems.For future work, while we will keep expanding our resource to include more paradigms, we will also follow the community guidelines in extending our resource to include morphological analysis and inflection examples in context.Acknowledgements\\tPart of this work was done during the', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What are the architectures used for the three tasks?', context='Workshop on Language Technology for Language Documentation and Revitalization. This material is based upon work generously supported by the National Science Foundation under grant 1761548.', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What are the architectures used for the three tasks?', context=\"pattern is based on a series of three person/number (PN) triplets. A PN triplet [X, Y, Z] consists of three tones: tone X is employed in the third person singular as well as in all plural forms; tone Y is employed in the second person singular, and tone Z, in the third person singular. Thus, a verb's membership in a particular conjugation class entails the assignment of one tone triplet to completive forms, another to progressive forms, and a third to habitual and potential forms. The paradigm of the verb lyu1 `fall' in Table illustrates: the conjugation class to which this verb belongs entails the assignment of the triplet [1, 42, 20] to the completive, [1, 42, 32] to the progressive, and [20, 42, 32] to the habitual and potential. Verbs in other conjugation classes exhibit other triplet series.The Resource\\tWe provide a hand-curated collection of complete inflection tables for 198 lemmata. The morphological tags follow the guidelines of the UniMorph schema BIBREF6, BIBREF7, in order to allow for the potential of cross-lingual transfer learning, and they are\", score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What are the architectures used for the three tasks?', context='A Resource for Studying Chatino Verbal Morphology\\tWe present the first resource focusing on the verbal inflectional morphology of San Juan Quiahije Chatino, a tonal mesoamerican language spoken in Mexico. We provide a collection of complete inflection tables of 198 lemmata, with morphological tags based on the UniMorph schema. We also provide baseline results on three core NLP tasks: morphological analysis, lemmatization, and morphological inflection.\\tIntroduction\\tThe recent years have seen unprecedented forward steps for Natural Language Processing (NLP) over almost every NLP subtask, relying on the advent of large data collections that can be leveraged to train deep neural networks. However, this progress has solely been observed in languages with significant data resources, while low-resource languages are left behind.The situation for endangered languages is usually even worse, as the focus of the scientific community mostly relies in language documentation. The typical endangered language documentation process typically includes the creation of language resources in the form of word lists, audio and video recordings, notes, or grammar fragments, with the created resources then stored into large online linguistics archives. This process is often hindered by the so-called Transcription Bottleneck, but recent advances', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What are the architectures used for the three tasks?', context='then an attention enhanced decoder BIBREF14 outputs the corresponding sequence of morphological tags, implemented in DyNet. The baseline results are shown in Table . The exact-match accuracy of 67% is lower than the average accuracy that context-aware systems can achieve, and it highlights the challenge that the complexity of the tonal system of SJQ Chatino can pose.Baseline Results ::: Lemmatization\\tLemmatization is the task of retrieving the underlying lemma from which an inflected form was derived. Although in some languages the lemma is distinct from all forms, in SJQ Chatino the lemma is defined as the completive third-person singular form. As a computational task, lemmatization entails producing the lemma given an inflected form (and possibly, given a set of morphological tags describing the input form). Popular approaches tackle it as a character-level edit sequence generation task BIBREF15, or as a character-level sequence-to-sequence task BIBREF16. For our baseline lemmatization systems we follow the latter approach. We trained a character level encoder-decoder model, similar to the above-mentioned inflection system, implemented in DyNet.The baseline results, with and without providing gold morphological tags along with the inflected form as', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What are the architectures used for the three tasks?', context=\"BIBREF0, BIBREF1 provide promising directions towards a solution for this issue.However, language documentation and linguistic description, although extremely important itself, does not meaningfully contribute to language conservation, which aims to ensure that the language stays in use. We believe that a major avenue towards continual language use is by further creating language technologies for endangered languages, essentially elevating them to the same level as high-resource, economically or politically stronger languages.The majority of the world's languages are categorized as synthetic, meaning that they have rich morphology, be it fusional, agglutinative, polysynthetic, or a mixture thereof. As Natural Language Processing (NLP) keeps expanding its frontiers to encompass more and more languages, modeling of the grammatical functions that guide language generation is of utmost importance. It follows, then, that the next crucial step for expanding NLP research on endangered languages is creating benchmarks for standard NLP tasks in such languages.With this work we take a small first step towards this direction. We present a resource that allows for benchmarking two NLP tasks in San Juan Quiahije Chatino, an endangered language spoken in southern Mexico: morphological analysis and morphological inflection, with a focus on the verb morphology of the\", score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which language family does Chatino belong to?', context='language.We first briefly discuss the Chatino language and the intricacies of its verb morphology (§SECREF2), then describe the resource (§SECREF3), and finally present baseline results on both the morphological analysis and the inflection tasks using state-of-the-art neural models (§SECREF4). We make our resource publicly available online.The Chatino Language\\tChatino is a group of languages spoken in Oaxaca, Mexico. Together with the Zapotec language group, the Chatino languages form the Zapotecan branch of the Otomanguean language family. There are three main Chatino languages: Zenzontepec Chatino (ZEN, ISO 639-2 code czn), Tataltepec Chatino (TAT, cta), and Eastern Chatino (ISO 639-2 ctp, cya, ctz, and cly) (E.Cruz 2011 and Campbell 2011). San Juan Quiahije Chatino (SJQ), the language of the focus of this study, belongs to Eastern Chatino, and is used by about 3000 speakers.The Chatino Language ::: Typology and Writing System\\tEastern Chatino languages , including SJQ Chatino, are intensively', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which language family does Chatino belong to?', context='Chatino, but it also provides an exciting new data point in the computational study of morphological analysis, lemmatization, and inflection, as it is the first one in a tonal language with explicit tonal markings in the writing system. In a similar vein, the Oto-Manguean Inflectional Class Database BIBREF22 provides a valuable resource for studying the verbal morphology of Oto-Manguean languages (including a couple of other Chatino variants: Yaitepec and Zenzotepec Chatino) but not in a format suitable for computational experiments.Conclusion\\tWe presented a resource of 198 complete inflectional paradigms in San Juan Quiahije Chatino, which will facilitate research in computational morphological analysis and inflection for low-resource tonal languages and languages of Mesoamerica. We also provide strong baseline results on computational morphological analysis, lemmatization, and inflection realization, using character-level neural encoder-decoder systems.For future work, while we will keep expanding our resource to include more paradigms, we will also follow the community guidelines in extending our resource to include morphological analysis and inflection examples in context.Acknowledgements\\tPart of this work was done during the', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which language family does Chatino belong to?', context='A Resource for Studying Chatino Verbal Morphology\\tWe present the first resource focusing on the verbal inflectional morphology of San Juan Quiahije Chatino, a tonal mesoamerican language spoken in Mexico. We provide a collection of complete inflection tables of 198 lemmata, with morphological tags based on the UniMorph schema. We also provide baseline results on three core NLP tasks: morphological analysis, lemmatization, and morphological inflection.\\tIntroduction\\tThe recent years have seen unprecedented forward steps for Natural Language Processing (NLP) over almost every NLP subtask, relying on the advent of large data collections that can be leveraged to train deep neural networks. However, this progress has solely been observed in languages with significant data resources, while low-resource languages are left behind.The situation for endangered languages is usually even worse, as the focus of the scientific community mostly relies in language documentation. The typical endangered language documentation process typically includes the creation of language resources in the form of word lists, audio and video recordings, notes, or grammar fragments, with the created resources then stored into large online linguistics archives. This process is often hindered by the so-called Transcription Bottleneck, but recent advances', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which language family does Chatino belong to?', context='input, are outlined in Table . We find that automatic lemmatization in SJQ Chatino achieves fairly high accuracy even with our simple baseline models (89% accuracy, $0.27$ average Levenshtein distance) and that providing the gold morphological tags provides a performance boost indicated by small improvements on both metrics. It it worth noting, though, that these results are also well below the $94--95\\\\%$ average accuracy and $0.13$ average Levenshtein distance that lemmatization models achieved over 107 treebanks in 66 languages for the SIGMORPHON 2019 shared task BIBREF11.Related Work\\tOur work builds and expands upon previous work on Indigenous languages of the Americas specifically focusing on the complexity of their morphology. Among other works similar to ours, BIBREF17 focused on the morphology of Dene verbs, BIBREF18 on Arapaho verbs, BIBREF19 on Shipibo-Konibo, and BIBREF20 on Saint Lawrence Island and Central Siberian Yupik. BIBREF21 describe an approach for elicit complete inflection paradigms, with experiments in languages like Nahuatl. Our resource is the first one for SJQ', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which language family does Chatino belong to?', context='corpus are outlined in Table 1 . Compared to all the other languages from the Unimorph project, this puts SJQ Chatino in the low- to mid-resource category, but nonetheless it is more than enough for benchmarking purposes.Baseline Results ::: Inflectional realization\\tInflectional realization defines the inflected forms of a lexeme/lemma. As a computational task, often referred to as simply “morphological inflection,\" inflectional realization is framed as a mapping from the pairing of a lemma with a set of morphological tags to the corresponding word form. For example, the inflectional realization of SJQ Chatino verb forms entails a mapping of the pairing of the lemma lyu1 `fall\\' with the tag-set 1;SG;PROG to the word form nlyon32.Morphological inflection has been thoroughly studied in monolingual high resource settings, especially through the recent SIGMORPHON challenges BIBREF8, BIBREF9, BIBREF10, with the latest iteration focusing more on low-resource settings, utilizing cross-lingual transfer BIBREF11. We use the guidelines of the state-of-the-art approach of BIBREF12 that achieved the highest inflection accuracy in the', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What system is used as baseline?', context='A Resource for Studying Chatino Verbal Morphology\\tWe present the first resource focusing on the verbal inflectional morphology of San Juan Quiahije Chatino, a tonal mesoamerican language spoken in Mexico. We provide a collection of complete inflection tables of 198 lemmata, with morphological tags based on the UniMorph schema. We also provide baseline results on three core NLP tasks: morphological analysis, lemmatization, and morphological inflection.\\tIntroduction\\tThe recent years have seen unprecedented forward steps for Natural Language Processing (NLP) over almost every NLP subtask, relying on the advent of large data collections that can be leveraged to train deep neural networks. However, this progress has solely been observed in languages with significant data resources, while low-resource languages are left behind.The situation for endangered languages is usually even worse, as the focus of the scientific community mostly relies in language documentation. The typical endangered language documentation process typically includes the creation of language resources in the form of word lists, audio and video recordings, notes, or grammar fragments, with the created resources then stored into large online linguistics archives. This process is often hindered by the so-called Transcription Bottleneck, but recent advances', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What system is used as baseline?', context='Workshop on Language Technology for Language Documentation and Revitalization. This material is based upon work generously supported by the National Science Foundation under grant 1761548.', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What system is used as baseline?', context='corpus are outlined in Table 1 . Compared to all the other languages from the Unimorph project, this puts SJQ Chatino in the low- to mid-resource category, but nonetheless it is more than enough for benchmarking purposes.Baseline Results ::: Inflectional realization\\tInflectional realization defines the inflected forms of a lexeme/lemma. As a computational task, often referred to as simply “morphological inflection,\" inflectional realization is framed as a mapping from the pairing of a lemma with a set of morphological tags to the corresponding word form. For example, the inflectional realization of SJQ Chatino verb forms entails a mapping of the pairing of the lemma lyu1 `fall\\' with the tag-set 1;SG;PROG to the word form nlyon32.Morphological inflection has been thoroughly studied in monolingual high resource settings, especially through the recent SIGMORPHON challenges BIBREF8, BIBREF9, BIBREF10, with the latest iteration focusing more on low-resource settings, utilizing cross-lingual transfer BIBREF11. We use the guidelines of the state-of-the-art approach of BIBREF12 that achieved the highest inflection accuracy in the', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What system is used as baseline?', context=\"pattern is based on a series of three person/number (PN) triplets. A PN triplet [X, Y, Z] consists of three tones: tone X is employed in the third person singular as well as in all plural forms; tone Y is employed in the second person singular, and tone Z, in the third person singular. Thus, a verb's membership in a particular conjugation class entails the assignment of one tone triplet to completive forms, another to progressive forms, and a third to habitual and potential forms. The paradigm of the verb lyu1 `fall' in Table illustrates: the conjugation class to which this verb belongs entails the assignment of the triplet [1, 42, 20] to the completive, [1, 42, 32] to the progressive, and [20, 42, 32] to the habitual and potential. Verbs in other conjugation classes exhibit other triplet series.The Resource\\tWe provide a hand-curated collection of complete inflection tables for 198 lemmata. The morphological tags follow the guidelines of the UniMorph schema BIBREF6, BIBREF7, in order to allow for the potential of cross-lingual transfer learning, and they are\", score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What system is used as baseline?', context=\"tagged with respect to:Person: first (1), second (2), and third (3)Number: singular (SG) ad plural (PL)Inclusivity (only applicable to first person plural verbs: inclusive (INCL) and exclusive (EXCL)Aspect/mood: completive (CPL), progressive (PROG), potential (POT), and habitual (HAB).Two examples of complete inflection tables for the verbs ndyu2 `fell from above' and lyu1 `fall' are shown in Table . Note how the first verb has the same PN triplet for all four aspect/mood categories, while the second paradigm is more representative in that it involves three different triplets (one for the completive, another for the progressive, and another for the habitual/potential). This variety is at the core of why the SJQ verb morphology is particularly interesting, and a challenging testcase for modern NLP systems.In total, we end up with 4716 groupings (triplets) of a lemma, a tag-set, and a form; we split these groupings randomly into a training set (3774 groupings), a development set (471 groupings), and test set (471 groupings). Basic statistics of the\", score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How was annotation done?', context='Workshop on Language Technology for Language Documentation and Revitalization. This material is based upon work generously supported by the National Science Foundation under grant 1761548.', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How was annotation done?', context='A Resource for Studying Chatino Verbal Morphology\\tWe present the first resource focusing on the verbal inflectional morphology of San Juan Quiahije Chatino, a tonal mesoamerican language spoken in Mexico. We provide a collection of complete inflection tables of 198 lemmata, with morphological tags based on the UniMorph schema. We also provide baseline results on three core NLP tasks: morphological analysis, lemmatization, and morphological inflection.\\tIntroduction\\tThe recent years have seen unprecedented forward steps for Natural Language Processing (NLP) over almost every NLP subtask, relying on the advent of large data collections that can be leveraged to train deep neural networks. However, this progress has solely been observed in languages with significant data resources, while low-resource languages are left behind.The situation for endangered languages is usually even worse, as the focus of the scientific community mostly relies in language documentation. The typical endangered language documentation process typically includes the creation of language resources in the form of word lists, audio and video recordings, notes, or grammar fragments, with the created resources then stored into large online linguistics archives. This process is often hindered by the so-called Transcription Bottleneck, but recent advances', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How was annotation done?', context='then an attention enhanced decoder BIBREF14 outputs the corresponding sequence of morphological tags, implemented in DyNet. The baseline results are shown in Table . The exact-match accuracy of 67% is lower than the average accuracy that context-aware systems can achieve, and it highlights the challenge that the complexity of the tonal system of SJQ Chatino can pose.Baseline Results ::: Lemmatization\\tLemmatization is the task of retrieving the underlying lemma from which an inflected form was derived. Although in some languages the lemma is distinct from all forms, in SJQ Chatino the lemma is defined as the completive third-person singular form. As a computational task, lemmatization entails producing the lemma given an inflected form (and possibly, given a set of morphological tags describing the input form). Popular approaches tackle it as a character-level edit sequence generation task BIBREF15, or as a character-level sequence-to-sequence task BIBREF16. For our baseline lemmatization systems we follow the latter approach. We trained a character level encoder-decoder model, similar to the above-mentioned inflection system, implemented in DyNet.The baseline results, with and without providing gold morphological tags along with the inflected form as', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How was annotation done?', context='input, are outlined in Table . We find that automatic lemmatization in SJQ Chatino achieves fairly high accuracy even with our simple baseline models (89% accuracy, $0.27$ average Levenshtein distance) and that providing the gold morphological tags provides a performance boost indicated by small improvements on both metrics. It it worth noting, though, that these results are also well below the $94--95\\\\%$ average accuracy and $0.13$ average Levenshtein distance that lemmatization models achieved over 107 treebanks in 66 languages for the SIGMORPHON 2019 shared task BIBREF11.Related Work\\tOur work builds and expands upon previous work on Indigenous languages of the Americas specifically focusing on the complexity of their morphology. Among other works similar to ours, BIBREF17 focused on the morphology of Dene verbs, BIBREF18 on Arapaho verbs, BIBREF19 on Shipibo-Konibo, and BIBREF20 on Saint Lawrence Island and Central Siberian Yupik. BIBREF21 describe an approach for elicit complete inflection paradigms, with experiments in languages like Nahuatl. Our resource is the first one for SJQ', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How was annotation done?', context=\"latest SIGMORPHON 2019 morphological inflection shared task. Our models are implemented in DyNet BIBREF13.Inflection results are outlined in Table . In the `standard' setting we simply train on the pre-defined training set, achieving an exact-match accuracy of 60% over the test set. Interestingly, the data augmentation approach of BIBREF12 that hallucinates new training paradigms based on character level alignments does not heed significant improvements in accuracy (only 2 percentage points increase, cf. with more than 15 percentage points increases in other languages). These results indicate that automatic morphological inflection for low-resource tonal languages like SJQ Chatino poses a particularly challenging setting, which perhaps requires explicit handling of tone information by the model.Baseline Results ::: Morphological Analysis\\tMorphological analysis is the task of creating a morphosyntactic description for a given word. It can be framed in a context-agnostic manner (as in our case) or within a given context, as for instance for the SIGMORPHON 2019 second shared task BIBREF11. We trained an encoder-decoder model that receives the form as character-level input, encodes it with a BiLSTM encoder, and\", score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How was the data collected?', context='Workshop on Language Technology for Language Documentation and Revitalization. This material is based upon work generously supported by the National Science Foundation under grant 1761548.', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How was the data collected?', context='A Resource for Studying Chatino Verbal Morphology\\tWe present the first resource focusing on the verbal inflectional morphology of San Juan Quiahije Chatino, a tonal mesoamerican language spoken in Mexico. We provide a collection of complete inflection tables of 198 lemmata, with morphological tags based on the UniMorph schema. We also provide baseline results on three core NLP tasks: morphological analysis, lemmatization, and morphological inflection.\\tIntroduction\\tThe recent years have seen unprecedented forward steps for Natural Language Processing (NLP) over almost every NLP subtask, relying on the advent of large data collections that can be leveraged to train deep neural networks. However, this progress has solely been observed in languages with significant data resources, while low-resource languages are left behind.The situation for endangered languages is usually even worse, as the focus of the scientific community mostly relies in language documentation. The typical endangered language documentation process typically includes the creation of language resources in the form of word lists, audio and video recordings, notes, or grammar fragments, with the created resources then stored into large online linguistics archives. This process is often hindered by the so-called Transcription Bottleneck, but recent advances', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How was the data collected?', context=\"BIBREF0, BIBREF1 provide promising directions towards a solution for this issue.However, language documentation and linguistic description, although extremely important itself, does not meaningfully contribute to language conservation, which aims to ensure that the language stays in use. We believe that a major avenue towards continual language use is by further creating language technologies for endangered languages, essentially elevating them to the same level as high-resource, economically or politically stronger languages.The majority of the world's languages are categorized as synthetic, meaning that they have rich morphology, be it fusional, agglutinative, polysynthetic, or a mixture thereof. As Natural Language Processing (NLP) keeps expanding its frontiers to encompass more and more languages, modeling of the grammatical functions that guide language generation is of utmost importance. It follows, then, that the next crucial step for expanding NLP research on endangered languages is creating benchmarks for standard NLP tasks in such languages.With this work we take a small first step towards this direction. We present a resource that allows for benchmarking two NLP tasks in San Juan Quiahije Chatino, an endangered language spoken in southern Mexico: morphological analysis and morphological inflection, with a focus on the verb morphology of the\", score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How was the data collected?', context='input, are outlined in Table . We find that automatic lemmatization in SJQ Chatino achieves fairly high accuracy even with our simple baseline models (89% accuracy, $0.27$ average Levenshtein distance) and that providing the gold morphological tags provides a performance boost indicated by small improvements on both metrics. It it worth noting, though, that these results are also well below the $94--95\\\\%$ average accuracy and $0.13$ average Levenshtein distance that lemmatization models achieved over 107 treebanks in 66 languages for the SIGMORPHON 2019 shared task BIBREF11.Related Work\\tOur work builds and expands upon previous work on Indigenous languages of the Americas specifically focusing on the complexity of their morphology. Among other works similar to ours, BIBREF17 focused on the morphology of Dene verbs, BIBREF18 on Arapaho verbs, BIBREF19 on Shipibo-Konibo, and BIBREF20 on Saint Lawrence Island and Central Siberian Yupik. BIBREF21 describe an approach for elicit complete inflection paradigms, with experiments in languages like Nahuatl. Our resource is the first one for SJQ', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How was the data collected?', context='Chatino, but it also provides an exciting new data point in the computational study of morphological analysis, lemmatization, and inflection, as it is the first one in a tonal language with explicit tonal markings in the writing system. In a similar vein, the Oto-Manguean Inflectional Class Database BIBREF22 provides a valuable resource for studying the verbal morphology of Oto-Manguean languages (including a couple of other Chatino variants: Yaitepec and Zenzotepec Chatino) but not in a format suitable for computational experiments.Conclusion\\tWe presented a resource of 198 complete inflectional paradigms in San Juan Quiahije Chatino, which will facilitate research in computational morphological analysis and inflection for low-resource tonal languages and languages of Mesoamerica. We also provide strong baseline results on computational morphological analysis, lemmatization, and inflection realization, using character-level neural encoder-decoder systems.For future work, while we will keep expanding our resource to include more paradigms, we will also follow the community guidelines in extending our resource to include morphological analysis and inflection examples in context.Acknowledgements\\tPart of this work was done during the', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What factors contribute to interpretive biases according to this research?', context='Bias in Semantic and Discourse Interpretation\\tIn this paper, we show how game-theoretic work on conversation combined with a theory of discourse structure provides a framework for studying interpretive bias. Interpretive bias is an essential feature of learning and understanding but also something that can be used to pervert or subvert the truth. The framework we develop here provides tools for understanding and analyzing the range of interpretive biases and the factors that contribute to them.\\tIntroduction\\tBias is generally considered to be a negative term: a biased story is seen as one that perverts or subverts the truth by offering a partial or incomplete perspective on the facts. But bias is in fact essential to understanding: one cannot interpret a set of facts—something humans are disposed to try to do even in the presence of data that is nothing but noise [38]—without relying on a bias or hypothesis to guide that interpretation. Suppose someone presents you with the sequence INLINEFORM0 and tells you to guess the next number. To make an educated guess, you must understand this sequence as instantiating a particular pattern; otherwise, every possible continuation of the sequence will be equally probable for you. Formulating a hypothesis about what pattern is at work will allow you to predict how the sequence will play', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What factors contribute to interpretive biases according to this research?', context='however, it is an open question whether there is an objective norm or not, whether it is attainable and, if so, under what conditions, and whether an agent builds a history for attaining that norm or for some other purpose.Organization of the paper\\tOur paper is organized as follows. Section SECREF2 introduces our model of interpretive bias. Section SECREF3 looks forward towards some consequences of our model for learning and interpretation. We then draw some conclusions in Section SECREF4 . A detailed and formal analysis of interpretive bias has important social implications. Questions of bias are not only timely but also pressing for democracies that are having a difficult time dealing with campaigns of disinformation and a society whose information sources are increasingly fragmented and whose biases are often concealed. Understanding linguistic and cognitive mechanisms for bias precisely and algorithmically can yield valuable tools for navigating in an informationally bewildering world.The model of interpretive bias\\tAs mentioned in Section SECREF1 , understanding interpretive bias requires two ingredients. First, we need to know what it is to interpret a text or to build a history over a set of facts. Our answer comes from analyzing discourse structure and interpretation in SDRT BIBREF2 , BIBREF3 . A history for a text connects its', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What factors contribute to interpretive biases according to this research?', context=\"the selection of a history for a set of data, where the data might be a set of nonlinguistic entities or a set of linguistically expressed contents. In particular, we'll look at what people call “unbiased” histories. For us these also involve a bias, what we call a “truth seeking bias”. This is a bias that gets at the truth or acceptably close to it. Our model can show us what such a bias looks like. And we will examine the question of whether it is possible to find such a truth oriented bias for a set of facts, and if so, under what conditions. Can we detect and avoid biases that don't get at the truth but are devised for some other purpose?Our study of interpretive bias relies on three key premises. The first premise is that histories are discursive interpretations of a set of data in the sense that like discourse interpretations, they link together a set of entities with semantically meaningful relations. As such they are amenable to an analysis using the tools used to model a discourse's content and structure. The second is that a bias consists of a purpose or goal that the histories it generates are built to achieve and that agents build histories for many different purposes—to discover the truth or to\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What factors contribute to interpretive biases according to this research?', context=\"Since the seminal work of Kahneman and Tversky and the economist Allais, psychologists and empirical economists have provided valuable insights into cognitive biases in simple decision problems and simple mathematical tasks BIBREF14 . Some of this work, for example the bias of framing effects BIBREF7 , is directly relevant to our theory of interpretive bias. A situation is presented using certain lexical choices that lead to different “frames”: INLINEFORM0 of the people will live if you do INLINEFORM1 (frame 1) versus INLINEFORM2 of the people will die if you do INLINEFORM3 (frame 2). In fact, INLINEFORM4 , the total population in question; so the two consequents of the conditionals are equivalent. Each frame elaborates or “colors” INLINEFORM5 in a way that affects an interpreter's evaluation of INLINEFORM6 . These frames are in effect short histories whose discourse structure explains their coloring effect. Psychologists, empirical economists and statisticians have also investigated cases of cognitive bias in which subjects deviate from prescriptively rational or independently given objective outcomes in quantitative decision making and frequency estimation, even though they arguably have the goal of seeking an optimal or “true” solution. In a general analysis of interpretive bias like ours,\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What factors contribute to interpretive biases according to this research?', context='to interpret eventualities described by the two clauses as figuring in one of two histories: one in which the eventuality described by the first clause caused the second, or one in which the second caused the first. Any time that structural connections are left implicit by speakers—and this is much if not most of the time in text— interpreters will be left to infer these connections and thereby potentially create their own history or version of events.Every model of data, every history over that data, comes with a bias that allows us to use observed facts to make predictions; bias even determines what kind of predictions the model is meant to make. Bayesian inference, which underlies many powerful models of inference and machine learning, likewise relies on bias in several ways: the estimate of a state given evidence depends upon a prior probability distribution over states, on assumptions about what parameters are probabilistically independent, and on assumptions about the kind of conditional probability distribution that each parameter abides by (e.g., normal distribution, noisy-or, bimodal). Each of these generates a (potentially different) history.Objective of the paper\\tIn this paper, we propose a program for research on bias. We will show how to model various types of bias as well as the way in which bias leads to', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which interpretative biases are analyzed in this paper?', context='Bias in Semantic and Discourse Interpretation\\tIn this paper, we show how game-theoretic work on conversation combined with a theory of discourse structure provides a framework for studying interpretive bias. Interpretive bias is an essential feature of learning and understanding but also something that can be used to pervert or subvert the truth. The framework we develop here provides tools for understanding and analyzing the range of interpretive biases and the factors that contribute to them.\\tIntroduction\\tBias is generally considered to be a negative term: a biased story is seen as one that perverts or subverts the truth by offering a partial or incomplete perspective on the facts. But bias is in fact essential to understanding: one cannot interpret a set of facts—something humans are disposed to try to do even in the presence of data that is nothing but noise [38]—without relying on a bias or hypothesis to guide that interpretation. Suppose someone presents you with the sequence INLINEFORM0 and tells you to guess the next number. To make an educated guess, you must understand this sequence as instantiating a particular pattern; otherwise, every possible continuation of the sequence will be equally probable for you. Formulating a hypothesis about what pattern is at work will allow you to predict how the sequence will play', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which interpretative biases are analyzed in this paper?', context='however, it is an open question whether there is an objective norm or not, whether it is attainable and, if so, under what conditions, and whether an agent builds a history for attaining that norm or for some other purpose.Organization of the paper\\tOur paper is organized as follows. Section SECREF2 introduces our model of interpretive bias. Section SECREF3 looks forward towards some consequences of our model for learning and interpretation. We then draw some conclusions in Section SECREF4 . A detailed and formal analysis of interpretive bias has important social implications. Questions of bias are not only timely but also pressing for democracies that are having a difficult time dealing with campaigns of disinformation and a society whose information sources are increasingly fragmented and whose biases are often concealed. Understanding linguistic and cognitive mechanisms for bias precisely and algorithmically can yield valuable tools for navigating in an informationally bewildering world.The model of interpretive bias\\tAs mentioned in Section SECREF1 , understanding interpretive bias requires two ingredients. First, we need to know what it is to interpret a text or to build a history over a set of facts. Our answer comes from analyzing discourse structure and interpretation in SDRT BIBREF2 , BIBREF3 . A history for a text connects its', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which interpretative biases are analyzed in this paper?', context=\"Since the seminal work of Kahneman and Tversky and the economist Allais, psychologists and empirical economists have provided valuable insights into cognitive biases in simple decision problems and simple mathematical tasks BIBREF14 . Some of this work, for example the bias of framing effects BIBREF7 , is directly relevant to our theory of interpretive bias. A situation is presented using certain lexical choices that lead to different “frames”: INLINEFORM0 of the people will live if you do INLINEFORM1 (frame 1) versus INLINEFORM2 of the people will die if you do INLINEFORM3 (frame 2). In fact, INLINEFORM4 , the total population in question; so the two consequents of the conditionals are equivalent. Each frame elaborates or “colors” INLINEFORM5 in a way that affects an interpreter's evaluation of INLINEFORM6 . These frames are in effect short histories whose discourse structure explains their coloring effect. Psychologists, empirical economists and statisticians have also investigated cases of cognitive bias in which subjects deviate from prescriptively rational or independently given objective outcomes in quantitative decision making and frequency estimation, even though they arguably have the goal of seeking an optimal or “true” solution. In a general analysis of interpretive bias like ours,\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which interpretative biases are analyzed in this paper?', context=\"the selection of a history for a set of data, where the data might be a set of nonlinguistic entities or a set of linguistically expressed contents. In particular, we'll look at what people call “unbiased” histories. For us these also involve a bias, what we call a “truth seeking bias”. This is a bias that gets at the truth or acceptably close to it. Our model can show us what such a bias looks like. And we will examine the question of whether it is possible to find such a truth oriented bias for a set of facts, and if so, under what conditions. Can we detect and avoid biases that don't get at the truth but are devised for some other purpose?Our study of interpretive bias relies on three key premises. The first premise is that histories are discursive interpretations of a set of data in the sense that like discourse interpretations, they link together a set of entities with semantically meaningful relations. As such they are amenable to an analysis using the tools used to model a discourse's content and structure. The second is that a bias consists of a purpose or goal that the histories it generates are built to achieve and that agents build histories for many different purposes—to discover the truth or to\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which interpretative biases are analyzed in this paper?', context=\"of our types in interpretive bias. But for us, the interpretive process is already well underway once the model, with its constraints, features and explanatory hypotheses, is posited; at least a partial history, or set of histories, has already been created.The ME model in BIBREF0 not only makes histories dependent on biases but also conditionally updates an agent's bias, the probability distribution, given the interpretation of the conversation or more generally a course of events as it has so far unfolded and crucially as the agent has so far interpreted it. This means that certain biases are reinforced as a history develops, and in turn strengthen the probability of histories generated by such biases in virtue of the types/histories correspondence. We now turn to an analysis of SECREF3 discussed in BIBREF4 , BIBREF0 where arguably this happens.Generalizing from the case study\\tThe Sheehan case study in BIBREF0 shows the interactions of interpretation and probability distributions over types. We'll refer to content that exploit assumptions about types' epistemic content. SECREF3 also offers a case of a self-confirming bias with Jury INLINEFORM0 . But the analysis proposed by BIBREF0 leaves open an important open question about what types are relevant\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How do they measure the diversity of inferences?', context='event background knowledge, then in the finetune stage CWVAE adapts such knowledge to each inference dimension. Experimental results demonstrate that CWVAE outperforms baseline methods in both the accuracy and diversity of generations.Acknowledgments\\tWe thank the anonymous reviewers for their constructive comments, and gratefully acknowledge the support of the National Key Research and Development Program of China (SQ2018AAA010010), the National Key Research and Development Program of China (2018YFB1005103), the National Natural Science Foundation of China (NSFC) via Grant 61702137.', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How do they measure the diversity of inferences?', context='which is particular suitable for evaluating the model performance on one-to-many problem BIBREF20. Further, we employ BLEU score to evaluate the accuracy of generations BIBREF21, and the number of distinct n-gram to evaluate the diversity of generations BIBREF6. The distinct is normalized to $[0, 1]$ by dividing the total number of generated tokens.Experiments ::: Evaluation Metrics ::: Human Evaluation\\tSince automatic evaluation of generations is still a challenging task BIBREF22, we also conduct human evaluations on the model performance. Five human experts are employed to evaluate the coherence, diversity and fluency of generated targets. Experts are asked to vote for if a generation is fluent or coherent for each generated target, and give a 1-5 score for the diversity of generations. For both Event2Mind and Atomic datasets, 100 events are randomly selected from the test set. For each method, top 10 generated targets of each base event are used for evaluation. Finally we report three overall averaged scores of coherence, diversity and fluency on both datasets, respectively.Experiments ::: Overall Results\\tWe list the perplexity and BLEU score of CWVAE and baseline methods on Event2Mind and Atomic in Table TABREF31 and Table', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How do they measure the diversity of inferences?', context='above-mentioned three distributions are assumed to be multivariate Gaussian distribution with a diagonal covariance structure:where $\\\\mu $ denotes the mean of the distribution, $\\\\sigma $ denotes the standard deviation of the distribution, and $I$ denotes the identity matrix.Given $h^x$, $h^y$ and $h^c$, we propose a novel attention-based inferer (ABI) module to estimate the mean and standard deviation of $q_{\\\\phi }(z_{c}|x,c)$, $q_{\\\\phi }(z_{c^{\\\\prime }}|x,y)$ and $q_{\\\\phi }(z|x,y)$:Briefly, through the attention mechanism, ABI can capture the semantic interaction between input sequences, and estimate the parameters of distributions based on it. We will introduce the specific structure of ABI in below.Prior Network Prior Network models $p_{\\\\theta }(z_{c^{\\\\prime }}|x)$ and $p_{\\\\theta }(z|x, z_{c^{\\\\prime }})$ based on $h^x$. The distribution of $p_{\\\\theta }(z_{c^{\\\\prime }}|x)$ and $p_{\\\\theta }(z|x, z_{c^{\\\\prime }})$ are still', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How do they measure the diversity of inferences?', context='TABREF33, respectively, and show the distinct-1 and distinct-2 score on Event2Mind and Atomic in Table TABREF32 and Table TABREF34, respectively. We find that:(1) As shown in Table TABREF32 and Table TABREF34, comparison between RNN-based Seq2Seq and variational-based methods, including Variational Seq2Seq, VRNMT, CWVAE-unpretrained and CWVAE shows that, variational-based methods could increase the diversity of generations. This confirms one of our motivations that variational-based methods could capture the latent semantic distribution within targets and increase the diversity of If-Then reasoning.(2) Comparing CWVAE-unpretrained with other baseline methods shows that, in general CWVAE improves the accuracy and diversity on both dataset. These results indicate the efficiency of CWVAE in capturing the latent semantic distribution of targets, and generate more reasonable inferential results.(3) Comparison between CWVAE and CWVAE-unpretrained shows that the pretrain stage could enhance the performance of CWVAE in both the accuracy and diversity. This is mainly because event knowledge could offer the guidance for If-Then reasoning. In the pretrain stage, CWVAE could capture the event', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How do they measure the diversity of inferences?', context='assumed to be multivariate Gaussian, whereas the parameters are different:where $\\\\mu ^{^{\\\\prime }}$ denotes the mean of the distribution, $\\\\sigma ^{^{\\\\prime }}$ denotes the standard deviation of the distribution and $I$ denotes the identity matrix.Then the attention-based inferer module is still employed to estimate parameters of distributions:Neural Decoder Given the base event $x$, the semantic latent variable $z$, and the context-aware latent variable $z_{c^{\\\\prime }}$, the neural decoder defines the generation probability of $y$ as following:where $p(y_j|y<j, z, z_{c^{\\\\prime }}, x)=g(y_{j-1}, s_{j-1}, e_j)$, $g(\\\\cdot )$ is an attention-based feed forward model, $e_j=\\\\sum _i \\\\alpha _{ji}h_i^{x}$ is the context vector and $s_{j-1}$ is the hidden state of the decoder. We obtain $g(\\\\cdot )$ and $e_j$ the same way as BIBREF12 (BIBREF12). Whereas our decoder differs from BIBREF12 (BIBREF12) in that our model integrates the', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='By how much do they improve the accuracy of inferences over state-of-the-art methods?', context='event background knowledge, then in the finetune stage CWVAE adapts such knowledge to each inference dimension. Experimental results demonstrate that CWVAE outperforms baseline methods in both the accuracy and diversity of generations.Acknowledgments\\tWe thank the anonymous reviewers for their constructive comments, and gratefully acknowledge the support of the National Key Research and Development Program of China (SQ2018AAA010010), the National Key Research and Development Program of China (2018YFB1005103), the National Natural Science Foundation of China (NSFC) via Grant 61702137.', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='By how much do they improve the accuracy of inferences over state-of-the-art methods?', context='which is particular suitable for evaluating the model performance on one-to-many problem BIBREF20. Further, we employ BLEU score to evaluate the accuracy of generations BIBREF21, and the number of distinct n-gram to evaluate the diversity of generations BIBREF6. The distinct is normalized to $[0, 1]$ by dividing the total number of generated tokens.Experiments ::: Evaluation Metrics ::: Human Evaluation\\tSince automatic evaluation of generations is still a challenging task BIBREF22, we also conduct human evaluations on the model performance. Five human experts are employed to evaluate the coherence, diversity and fluency of generated targets. Experts are asked to vote for if a generation is fluent or coherent for each generated target, and give a 1-5 score for the diversity of generations. For both Event2Mind and Atomic datasets, 100 events are randomly selected from the test set. For each method, top 10 generated targets of each base event are used for evaluation. Finally we report three overall averaged scores of coherence, diversity and fluency on both datasets, respectively.Experiments ::: Overall Results\\tWe list the perplexity and BLEU score of CWVAE and baseline methods on Event2Mind and Atomic in Table TABREF31 and Table', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='By how much do they improve the accuracy of inferences over state-of-the-art methods?', context='background knowledge through context-aware latent variable, and such knowledge could be be adapted to our task through the fintune stage.To further evaluate the effectiveness of our proposed approach, we also conduct human evaluations, the results of which are shown in Table TABREF39 and Table TABREF40. On both datasets, CWVAE-based methods achieve consistent better coherence, diversity and fluency performances. While comparing with CWVAE-Unpretrained, the pretrain procedure could improves the performance on coherence and fluency. The main reasons are twofold: first, the CWVAE has advantage in capturing the semantic distribution of targets; second, event background learned from the pretrain stage is helpful for the If-Then reasoning.Experiments ::: Case Study\\tTable TABREF41 provides an example of model generations given the base event “PersonX works tirelessly” and the inference dimension “xIntent”. The generations under CWVAE mainly contain four kinds of semantics: (1) be productive, (2) finish his work soon, (3) accomplish goal, (4) earn more money. While the semantics of generations using baseline RNN-based Seq2Seq model is relatively limited. Furthermore, the first three kinds of semantic overlap the three ground truth targets, and the fourth kind of', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='By how much do they improve the accuracy of inferences over state-of-the-art methods?', context='TABREF33, respectively, and show the distinct-1 and distinct-2 score on Event2Mind and Atomic in Table TABREF32 and Table TABREF34, respectively. We find that:(1) As shown in Table TABREF32 and Table TABREF34, comparison between RNN-based Seq2Seq and variational-based methods, including Variational Seq2Seq, VRNMT, CWVAE-unpretrained and CWVAE shows that, variational-based methods could increase the diversity of generations. This confirms one of our motivations that variational-based methods could capture the latent semantic distribution within targets and increase the diversity of If-Then reasoning.(2) Comparing CWVAE-unpretrained with other baseline methods shows that, in general CWVAE improves the accuracy and diversity on both dataset. These results indicate the efficiency of CWVAE in capturing the latent semantic distribution of targets, and generate more reasonable inferential results.(3) Comparison between CWVAE and CWVAE-unpretrained shows that the pretrain stage could enhance the performance of CWVAE in both the accuracy and diversity. This is mainly because event knowledge could offer the guidance for If-Then reasoning. In the pretrain stage, CWVAE could capture the event', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='By how much do they improve the accuracy of inferences over state-of-the-art methods?', context='Then as illustrated in Figure FIGREF5 (b), $y$ could be generated from $x$ and $z$.CVAE is trained to maximize the conditional likelihood $p(y|x)$, which involves an intractable marginalization over the latent variable $z$. Instead, following BIBREF10 (BIBREF10), a practical way is to introduce another deep network (parameterized by $\\\\phi $) $q_{\\\\phi }(z|x,y)$ to approximate the true posterior distribution $p(z|x,y)$ and maximize the evidence lower bound (ELBO) of the log-likelihood function:Therefore, CVAE is composed of three neural networks in general. We refer to $p_{\\\\theta }(z|x)$ as a prior network, $q_{\\\\phi }(z|x,y)$ as a recognition network, and $p_{\\\\theta }(y|x,z)$ as a neural decoder.Context-aware Variational Autoencoder\\tTraditional CVAE can model the event-target relation. In other words, given an observed event, CVAE can generate its corresponding targets. While in this paper we model the If-Then reasoning as a [(background), event]-target process. It means that in addition to the observed event, we', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which models do they use as baselines on the Atomic dataset?', context='task-specific dataset to adapt the event background information to each specific aspect of If-Then inferential target (e.g., intents, reactions, etc.).Experiments on the Event2Mind and Atomic dataset show that our proposed approach outperforms baseline methods in both the accuracy and diversity of inferences. The code is released at https://github.com/sjcfr/CWVAE.Background\\tBefore specifically describing two dataset —- Event2Mind and Atomic used in this paper as well as the If-Then reasoning task, for clarity, we define the following terminologies:Base event: the prerequisite event in If-Then reasoning, organized as a verb phrase with a predicate and its arguments, such as the event “PersonX finds a new job” shown in Figure FIGREF1.Inference dimension: a particular If-Then reasoning type, e.g., intents, effects of the base event. Details are shown in Table TABREF2 and Table TABREF3.Target: the inferential results. For example, as shown in Figure FIGREF1, given a base event “PersonX finds a new job” and one inference dimension “xReact”, the targets could be “relieved” or “needy”. Notice that each inference dimension can have multiple', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which models do they use as baselines on the Atomic dataset?', context='which is particular suitable for evaluating the model performance on one-to-many problem BIBREF20. Further, we employ BLEU score to evaluate the accuracy of generations BIBREF21, and the number of distinct n-gram to evaluate the diversity of generations BIBREF6. The distinct is normalized to $[0, 1]$ by dividing the total number of generated tokens.Experiments ::: Evaluation Metrics ::: Human Evaluation\\tSince automatic evaluation of generations is still a challenging task BIBREF22, we also conduct human evaluations on the model performance. Five human experts are employed to evaluate the coherence, diversity and fluency of generated targets. Experts are asked to vote for if a generation is fluent or coherent for each generated target, and give a 1-5 score for the diversity of generations. For both Event2Mind and Atomic datasets, 100 events are randomly selected from the test set. For each method, top 10 generated targets of each base event are used for evaluation. Finally we report three overall averaged scores of coherence, diversity and fluency on both datasets, respectively.Experiments ::: Overall Results\\tWe list the perplexity and BLEU score of CWVAE and baseline methods on Event2Mind and Atomic in Table TABREF31 and Table', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which models do they use as baselines on the Atomic dataset?', context='targets.Event2Mind Dataset contains 25K base events and 300K targets, annotated through crowdsourcing. Event2Mind is organized in a hierarchical form: each base event has three types of inference dimensions, and given a base event, under one of inference dimensions, several targets may simultaneously exist. Table TABREF2 shows the (base event-inference dimension-target) hierarchical structure through an example from Event2Mind.Atomic Dataset Inspired by Event2Mind, the Atomic dataset shares the same hierarchical structure as Event2Mind, while scales up the size of dataset and expands the scope to nine types of inference dimensions. Table TABREF3 shows the (base event-inference dimension-target) hierarchical structure through an example from Atomic. Though Atomic covers the inference dimensions of Event2Mind, the base event collection of Event2Mind is nonidentical to that of Atomic.Problem Definition The If-Then reasoning task could be formally defined as a conditional one-to-many generation problem: given a base event $x$ and one inference dimension $d$, the model is required to generate targets $y=f(x, d)$ as close to the ground truths as possible. Both $x$ and $y$ consist of sequence of words: $x=\\\\lbrace x_1,\\\\dots ,', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which models do they use as baselines on the Atomic dataset?', context='TABREF33, respectively, and show the distinct-1 and distinct-2 score on Event2Mind and Atomic in Table TABREF32 and Table TABREF34, respectively. We find that:(1) As shown in Table TABREF32 and Table TABREF34, comparison between RNN-based Seq2Seq and variational-based methods, including Variational Seq2Seq, VRNMT, CWVAE-unpretrained and CWVAE shows that, variational-based methods could increase the diversity of generations. This confirms one of our motivations that variational-based methods could capture the latent semantic distribution within targets and increase the diversity of If-Then reasoning.(2) Comparing CWVAE-unpretrained with other baseline methods shows that, in general CWVAE improves the accuracy and diversity on both dataset. These results indicate the efficiency of CWVAE in capturing the latent semantic distribution of targets, and generate more reasonable inferential results.(3) Comparison between CWVAE and CWVAE-unpretrained shows that the pretrain stage could enhance the performance of CWVAE in both the accuracy and diversity. This is mainly because event knowledge could offer the guidance for If-Then reasoning. In the pretrain stage, CWVAE could capture the event', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which models do they use as baselines on the Atomic dataset?', context='Then as illustrated in Figure FIGREF5 (b), $y$ could be generated from $x$ and $z$.CVAE is trained to maximize the conditional likelihood $p(y|x)$, which involves an intractable marginalization over the latent variable $z$. Instead, following BIBREF10 (BIBREF10), a practical way is to introduce another deep network (parameterized by $\\\\phi $) $q_{\\\\phi }(z|x,y)$ to approximate the true posterior distribution $p(z|x,y)$ and maximize the evidence lower bound (ELBO) of the log-likelihood function:Therefore, CVAE is composed of three neural networks in general. We refer to $p_{\\\\theta }(z|x)$ as a prior network, $q_{\\\\phi }(z|x,y)$ as a recognition network, and $p_{\\\\theta }(y|x,z)$ as a neural decoder.Context-aware Variational Autoencoder\\tTraditional CVAE can model the event-target relation. In other words, given an observed event, CVAE can generate its corresponding targets. While in this paper we model the If-Then reasoning as a [(background), event]-target process. It means that in addition to the observed event, we', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How does the context-aware variational autoencoder learn event background information?', context='also want to involve the event background knowledge (which can be learned from event contexts) to generate the reasonable targets.To this end, we propose a context-aware variational autoencoder (CWVAE), with two additional latent variables: a context-acquiring latent variable $z_c$ to directly acquire context information, and a context-aware latent variable $z_{c^{\\\\prime }}$ to learn background knowledge from $z_c$, as shown in Figure FIGREF6 (a). However, the event context information is absent in the Event2Mind and Atomic dataset. To learn from the external event context information, we design the following two-stage training procedure for CWVAE.Pretrain: Learning Event Background Knowledge from Auxiliary Dataset In the pretrain stage, CWVAE is trained on three narrative story corpora with rich event context information. As shown in Figure FIGREF6 (a), context-acquiring latent variable $z_c$ is directly conditioned on the context $c$. Hence, $z_c$ could be employed for acquiring background knowledge from event contexts. Then, we minimize the distance between $z_c$ and the context-aware latent variable $z_{c^{\\\\prime }}$, by which the event background knowledge is transferred from $z_c$ to', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How does the context-aware variational autoencoder learn event background information?', context='generic responses, rather than meaningful and specific answers BIBREF6, BIBREF7.Second, as a commonsense reasoning problem, rich background knowledge is necessary for generating reasonable inferences. For example, as shown in Figure FIGREF1, the feeling of PersonX upon the event “PersonX finds a new job” could be multiple. However, after given a context “PersonX was fired”, the plausible inferences would be narrowed down to “needy” or “stressed out”.To better solve these problems, we propose a context-aware variational autoencoder (CWVAE) together with a two-stage training procedure. Variational Autoencoder (VAE) based models have shown great potential in modeling the one-to-many problem and generate diversified inferences BIBREF8, BIBREF9.In addition to the traditional VAE structure, we introduces an extra context-aware latent variable in CWVAE to learn the event background knowledge. In the pretrain stage, CWVAE is trained on an auxiliary dataset (consists of three narrative story corpora and contains rich event background knowledge), to learn the event background information by using the context-aware latent variable. Subsequently, in the finetune stage, CWVAE is trained on the', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How does the context-aware variational autoencoder learn event background information?', context='Modeling Event Background for If-Then Commonsense Reasoning Using Context-aware Variational Autoencoder\\tUnderstanding event and event-centered commonsense reasoning are crucial for natural language processing (NLP). Given an observed event, it is trivial for human to infer its intents and effects, while this type of If-Then reasoning still remains challenging for NLP systems. To facilitate this, a If-Then commonsense reasoning dataset Atomic is proposed, together with an RNN-based Seq2Seq model to conduct such reasoning. However, two fundamental problems still need to be addressed: first, the intents of an event may be multiple, while the generations of RNN-based Seq2Seq models are always semantically close; second, external knowledge of the event background may be necessary for understanding events and conducting the If-Then reasoning. To address these issues, we propose a novel context-aware variational autoencoder effectively learning event background information to guide the If-Then reasoning. Experimental results show that our approach improves the accuracy and diversity of inferences compared with state-of-the-art baseline methods.\\tIntroduction\\tRecently, event-centered commonsense knowledge has attracted much attention BIBREF0, BIBREF1, BIBREF2, BIBREF3, because of understanding events is', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How does the context-aware variational autoencoder learn event background information?', context='CVAE, the ELBO of CWVAE is defined as follows:which is the objective function at the finetune stage.While in the pretrain stage, as we aim to learn background knowledge through minimizing the distance between $z_c$ and $z_{c^{\\\\prime }}$, in addition to $L^{ELBO}$, a context-aware regulation term is introduced:where the context aware regularization term is the KL distance between $z$ and $z_{c^{\\\\prime }}$. Through minimizing the context aware regularization term, we aim to pass event context knowledge from $z_c$ to the context aware latent variable $z_{c^{\\\\prime }}$.Context-aware Variational Autoencoder ::: Training Details\\tTo test the performance of CWVAE, we split the Event2Mind and Atomic dataset into training, development and test sets (80%, 10%, 10%) in the same way as BIBREF5 (BIBREF5) and BIBREF4 (BIBREF4), respectively. We initialize the embedding layer from 300d GloVe word embeddings. The neural encoder is chosen to be biGRU with 300 hidden units. For the ABI module, size of $W_a$ and', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How does the context-aware variational autoencoder learn event background information?', context='Variational AutoEncoder-Decoder Based Natural Language Generation\\tVAE BIBREF10 has been widely applied in various of text generation tasks, such as dialogue and machine translation. In dialogue generation, BIBREF9 (BIBREF9) adapts VAE with encoder-decoder framework to model the latent semantic distribution of answers, which can increase the diversity of generations. For the task of machine translation, BIBREF19 (BIBREF19) and BIBREF28 (BIBREF28) employ a latent variable to capture the semantic interaction between the source and target sentence, and regard the latent variable as a supplementation of attention mechanism. While BIBREF29 (BIBREF29) use the latent variable to model topic distributions in text generation. In this paper, we introduce an additional context-aware latent variable to effectively learn background knowledge and conduct If-Then reasoning on the guidance of it.Conclusion\\tIn this paper, we propose a novel context-aware VAE (CWVAE) framework with two training stages for If-Then commonsense reasoning. By introducing an additional context-aware latent variable, CWVAE is able to learn external background knowledge, and conduct If-Then reasoning under its guidance. In the pretrain stage, CWVAE learns', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What is the size of the Atomic dataset?', context='targets.Event2Mind Dataset contains 25K base events and 300K targets, annotated through crowdsourcing. Event2Mind is organized in a hierarchical form: each base event has three types of inference dimensions, and given a base event, under one of inference dimensions, several targets may simultaneously exist. Table TABREF2 shows the (base event-inference dimension-target) hierarchical structure through an example from Event2Mind.Atomic Dataset Inspired by Event2Mind, the Atomic dataset shares the same hierarchical structure as Event2Mind, while scales up the size of dataset and expands the scope to nine types of inference dimensions. Table TABREF3 shows the (base event-inference dimension-target) hierarchical structure through an example from Atomic. Though Atomic covers the inference dimensions of Event2Mind, the base event collection of Event2Mind is nonidentical to that of Atomic.Problem Definition The If-Then reasoning task could be formally defined as a conditional one-to-many generation problem: given a base event $x$ and one inference dimension $d$, the model is required to generate targets $y=f(x, d)$ as close to the ground truths as possible. Both $x$ and $y$ consist of sequence of words: $x=\\\\lbrace x_1,\\\\dots ,', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What is the size of the Atomic dataset?', context='task-specific dataset to adapt the event background information to each specific aspect of If-Then inferential target (e.g., intents, reactions, etc.).Experiments on the Event2Mind and Atomic dataset show that our proposed approach outperforms baseline methods in both the accuracy and diversity of inferences. The code is released at https://github.com/sjcfr/CWVAE.Background\\tBefore specifically describing two dataset —- Event2Mind and Atomic used in this paper as well as the If-Then reasoning task, for clarity, we define the following terminologies:Base event: the prerequisite event in If-Then reasoning, organized as a verb phrase with a predicate and its arguments, such as the event “PersonX finds a new job” shown in Figure FIGREF1.Inference dimension: a particular If-Then reasoning type, e.g., intents, effects of the base event. Details are shown in Table TABREF2 and Table TABREF3.Target: the inferential results. For example, as shown in Figure FIGREF1, given a base event “PersonX finds a new job” and one inference dimension “xReact”, the targets could be “relieved” or “needy”. Notice that each inference dimension can have multiple', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What is the size of the Atomic dataset?', context='$W_b$ is set to be $100 \\\\times d_a$ and $100 \\\\times d_b$ respectively. The dimension of $z_c$, $z_{c^{\\\\prime }}$ and $z$ is all set as 40. The neural decoder is set to be GRU with 300d hidden state. Regulation coefficient $\\\\lambda $ of context-aware regulation term is set to be 0.1. Models are trained using an Adam optimizer BIBREF15 with a learning rate of 0.001.Experiments ::: Auxiliary Dataset\\tThe auxiliary dataset is built upon three human-written story corpora: ROCStories BIBREF16, VIST BIBREF17 and WritingPrompts BIBREF18. ROCStories and VIST are composed of short stories with five sentences. We filter out stories of more than 1,000 words in WritingPrompts, and cut the remaining stories into five-sentence-paragraphs.For each five-sentence-paragraph, we define the first three sentences as contexts of the base event, the fourth sentence as the base event, and the fifth sentence as the inference target. For example, as shown in Table TABREF25, the first three sentences describe a context that Jason was unsatisfied about his job', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='What is the size of the Atomic dataset?', context='which is particular suitable for evaluating the model performance on one-to-many problem BIBREF20. Further, we employ BLEU score to evaluate the accuracy of generations BIBREF21, and the number of distinct n-gram to evaluate the diversity of generations BIBREF6. The distinct is normalized to $[0, 1]$ by dividing the total number of generated tokens.Experiments ::: Evaluation Metrics ::: Human Evaluation\\tSince automatic evaluation of generations is still a challenging task BIBREF22, we also conduct human evaluations on the model performance. Five human experts are employed to evaluate the coherence, diversity and fluency of generated targets. Experts are asked to vote for if a generation is fluent or coherent for each generated target, and give a 1-5 score for the diversity of generations. For both Event2Mind and Atomic datasets, 100 events are randomly selected from the test set. For each method, top 10 generated targets of each base event are used for evaluation. Finally we report three overall averaged scores of coherence, diversity and fluency on both datasets, respectively.Experiments ::: Overall Results\\tWe list the perplexity and BLEU score of CWVAE and baseline methods on Event2Mind and Atomic in Table TABREF31 and Table', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What is the size of the Atomic dataset?', context='above-mentioned three distributions are assumed to be multivariate Gaussian distribution with a diagonal covariance structure:where $\\\\mu $ denotes the mean of the distribution, $\\\\sigma $ denotes the standard deviation of the distribution, and $I$ denotes the identity matrix.Given $h^x$, $h^y$ and $h^c$, we propose a novel attention-based inferer (ABI) module to estimate the mean and standard deviation of $q_{\\\\phi }(z_{c}|x,c)$, $q_{\\\\phi }(z_{c^{\\\\prime }}|x,y)$ and $q_{\\\\phi }(z|x,y)$:Briefly, through the attention mechanism, ABI can capture the semantic interaction between input sequences, and estimate the parameters of distributions based on it. We will introduce the specific structure of ABI in below.Prior Network Prior Network models $p_{\\\\theta }(z_{c^{\\\\prime }}|x)$ and $p_{\\\\theta }(z|x, z_{c^{\\\\prime }})$ based on $h^x$. The distribution of $p_{\\\\theta }(z_{c^{\\\\prime }}|x)$ and $p_{\\\\theta }(z|x, z_{c^{\\\\prime }})$ are still', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which coreference resolution systems are tested?', context='Gender Bias in Coreference Resolution\\tWe present an empirical study of gender bias in coreference resolution systems. We first introduce a novel, Winograd schema-style set of minimal pair sentences that differ only by pronoun gender. With these\"Winogender schemas,\"we evaluate and confirm systematic gender bias in three publicly-available coreference resolution systems, and correlate this bias with real-world and textual gender statistics.\\tIntroduction\\tThere is a classic riddle: A man and his son get into a terrible car crash. The father dies, and the boy is badly injured. In the hospital, the surgeon looks at the patient and exclaims, “I can\\'t operate on this boy, he\\'s my son!” How can this be?That a majority of people are reportedly unable to solve this riddle is taken as evidence of underlying implicit gender bias BIBREF0 : many first-time listeners have difficulty assigning both the role of “mother” and “surgeon” to the same entity.As the riddle reveals, the task of coreference resolution in English is tightly bound with questions of gender, for humans and automated systems alike (see Figure 1 ). As awareness grows of the ways in which data-driven AI technologies may acquire and amplify human-like biases BIBREF1', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which coreference resolution systems are tested?', context=\", BIBREF2 , BIBREF3 , this work investigates how gender biases manifest in coreference resolution systems.There are many ways one could approach this question; here we focus on gender bias with respect to occupations, for which we have corresponding U.S. employment statistics. Our approach is to construct a challenge dataset in the style of Winograd schemas, wherein a pronoun must be resolved to one of two previously-mentioned entities in a sentence designed to be easy for humans to interpret, but challenging for data-driven systems BIBREF4 . In our setting, one of these mentions is a person referred to by their occupation; by varying only the pronoun's gender, we are able to test the impact of gender on resolution. With these “Winogender schemas,” we demonstrate the presence of systematic gender bias in multiple publicly-available coreference resolution systems, and that occupation-specific bias is correlated with employment statistics. We release these test sentences to the public.In our experiments, we represent gender as a categorical variable with either two or three possible values: female, male, and (in some cases) neutral. These choices reflect limitations of the textual and real-world datasets we use.Coreference Systems\\tIn this work, we evaluate three publicly-available off-the-shelf coreference resolution\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which coreference resolution systems are tested?', context='$ 2 sentence templates per occupation $\\\\times $ 2 participants $\\\\times $ 3 pronoun genders.Results and Discussion\\tWe evaluate examples of each of the three coreference system architectures described in \"Coreference Systems\" : the BIBREF5 sieve system from the rule-based paradigm (referred to as RULE), BIBREF6 from the statistical paradigm (STAT), and the BIBREF11 deep reinforcement system from the neural paradigm (NEURAL).By multiple measures, the Winogender schemas reveal varying degrees of gender bias in all three systems. First we observe that these systems do not behave in a gender-neutral fashion. That is to say, we have designed test sentences where correct pronoun resolution is not a function of gender (as validated by human annotators), but system predictions do exhibit sensitivity to pronoun gender: 68% of male-female minimal pair test sentences are resolved differently by the RULE system; 28% for STAT; and 13% for NEURAL.Overall, male pronouns are also more likely to be resolved as occupation than female or neutral pronouns across all systems: for RULE, 72% male vs 29% female and 1% neutral; for STAT, 71% male vs 63% female', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which coreference resolution systems are tested?', context='systems, representing three different machine learning paradigms: rule-based systems, feature-driven statistical systems, and neural systems.Winogender Schemas\\tOur intent is to reveal cases where coreference systems may be more or less likely to recognize a pronoun as coreferent with a particular occupation based on pronoun gender, as observed in Figure 1 . To this end, we create a specialized evaluation set consisting of 120 hand-written sentence templates, in the style of the Winograd Schemas BIBREF4 . Each sentence contains three referring expressions of interest:We use a list of 60 one-word occupations obtained from Caliskan183 (see supplement), with corresponding gender percentages available from the U.S. Bureau of Labor Statistics. For each occupation, we wrote two similar sentence templates: one in which pronoun is coreferent with occupation, and one in which it is coreferent with participant (see Figure 2 ). For each sentence template, there are three pronoun instantiations (female, male, or neutral), and two participant instantiations (a specific participant, e.g., “the passenger,” and a generic paricipant, “someone.”) With the templates fully instantiated, the evaluation set contains 720 sentences: 60 occupations $\\\\times', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Which coreference resolution systems are tested?', context='with extensive analysis of observed system bias, revealing its correlation with biases present in real-world and textual statistics; by contrast, zhao-wang:2018:N18-1 present methods of debiasing existing systems, showing that simple approaches such as augmenting training data with gender-swapped examples or directly editing noun phrase counts in the B&L resource are effective at reducing system bias, as measured by the schemas. Complementary differences exist between the two schema formulations: Winogender schemas (this work) include gender-neutral pronouns, are syntactically diverse, and are human-validated; WinoBias includes (and delineates) sentences resolvable from syntax alone; a Winogender schema has one occupational mention and one “other participant” mention; WinoBias has two occupational mentions. Due to these differences, we encourage future evaluations to make use of both datasets.Conclusion and Future Work\\tWe have introduced “Winogender schemas,” a pronoun resolution task in the style of Winograd schemas that enables us to uncover gender bias in coreference resolution systems. We evaluate three publicly-available, off-the-shelf systems and find systematic gender bias in each: for many occupations, systems strongly prefer to resolve pronouns of one gender over another. We demonstrate', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='At what text unit/level were documents processed?', context='extracted start and end dates. Note that these post processing steps were not applied in the evaluation presented in the previous section, and so the figures reported in Tables TABREF6 and TABREF7 actually under-report the accuracy of our models in a real-world setting.Conclusions\\tThis work tackles the challenge of content extraction from two types of business documents, regulatory filings and property lease agreements. The problem is straightforwardly formulated as a sequence labeling task, and we fine-tune BERT for this application. We show that our simple models can achieve reasonable accuracy with only modest amounts of training data, illustrating the power and flexibility of modern NLP models. Our cloud platform pulls these models together in an easy-to-use interface for addressing real-world business needs.', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='At what text unit/level were documents processed?', context='Evaluation\\tAt inference time, documents from the test set are segmented into paragraphs and fed into the fine-tuned BERT model one at a time. Typically, sequence labeling tasks are evaluated in terms of precision, recall, and F$_1$ at the entity level, per sentence. However, such an evaluation is inappropriate for our task because the content elements represent properties of the entire document as a whole, not individual sentences.Instead, we adopted the following evaluation procedure: For each content element type (e.g., “tenant”), we extract all tagged spans from the document, and after deduplication, treat the entities as a set that we then measure against the ground truth in terms of precision, recall, and F$_1$. We do this because there may be multiple ground truth entities and BERT may mark multiple spans in a document with a particular entity type. Note that the metrics are based on exact matches—this means that, for example, if the extracted entity has an extraneous token compared to a ground truth entity, the system receives no credit.Results\\tOur main results are presented in Table TABREF6 on the test set of the regulatory filings and in Table TABREF7 on the test set of the property lease agreements; F$_1$, precision,', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='At what text unit/level were documents processed?', context='FIGREF12. We have invested substantial effort in making the interface as easy to use as possible; for example, annotating content elements is as easy as selecting text from the document. Our platform is able to ingest documents in a variety of formats, including PDFs and Microsoft Word, and converts these formats into plain text before presenting them to the annotators.The second feature of the platform is the ability for users to upload new documents and apply inference on them using a fine-tuned BERT model; a screenshot of this feature is shown in Figure FIGREF13. The relevant content elements are highlighted in the document.On the cloud platform, the inference module also applies a few simple rule-based modifications to post-process BERT extraction results. For any of the extracted dates, we further applied a date parser based on rules and regular expressions to normalize and canonicalize the extracted outputs. In the regulatory filings, we tried to normalize numbers that were written in a mixture of Arabic numerals and Chinese units (e.g., “UTF8gbsn亿”, the unit for $10^8$) and discarded partial results if simple rule-based rewrites were not successful. In the property lease agreements, the contract length, if not directly extracted by BERT, is computed from the', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='At what text unit/level were documents processed?', context='and recall are computed in the manner described above. We show metrics across all content elements (micro-averaged) as well as broken down by types. For the property lease agreements, we show results on all documents (left) and only over those with unseen templates (right). Examining these results, we see that although there is some degradation in effectiveness between all documents and only unseen templates, it appears that BERT is able to generalize to previously-unseen expressions of the content elements. Specifically, it is not the case that the model is simply memorizing fixed patterns or key phrases—otherwise, we could just craft a bunch of regular expression patterns for this task. This is a nice result that shows off the power of modern neural NLP models.Overall, we would characterize our models as achieving reasonable accuracy, comparable to extraction tasks in more “traditional” domains, with modest amounts of training data. It does appear that with fine tuning, BERT is able to adapt to the linguistic characteristics of these specialized types of documents. For example, the regulatory filings have quite specialized vocabulary and the property lease agreements have numeric heading structures—BERT does not seem to be confused by these elements, which for the most part do not appear in the texts that the model was pre-trained', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='At what text unit/level were documents processed?', context='Rapid Adaptation of BERT for Information Extraction on Domain-Specific Business Documents\\tTechniques for automatically extracting important content elements from business documents such as contracts, statements, and filings have the potential to make business operations more efficient. This problem can be formulated as a sequence labeling task, and we demonstrate the adaption of BERT to two types of business documents: regulatory filings and property lease agreements. There are aspects of this problem that make it easier than \"standard\" information extraction tasks and other aspects that make it more difficult, but on balance we find that modest amounts of annotated data (less than 100 documents) are sufficient to achieve reasonable accuracy. We integrate our models into an end-to-end cloud platform that provides both an easy-to-use annotation interface as well as an inference interface that allows users to upload documents and inspect model outputs.\\tIntroduction\\tBusiness documents broadly characterize a large class of documents that are central to the operation of business. These include legal contracts, purchase orders, financial statements, regulatory filings, and more. Such documents have a number of characteristics that set them apart from the types of texts that most NLP techniques today are designed to process (Wikipedia articles, news stories, web pages, etc.): They are heterogeneous and frequently contain a mix', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What evaluation metric were used for presenting results? ', context='Evaluation\\tAt inference time, documents from the test set are segmented into paragraphs and fed into the fine-tuned BERT model one at a time. Typically, sequence labeling tasks are evaluated in terms of precision, recall, and F$_1$ at the entity level, per sentence. However, such an evaluation is inappropriate for our task because the content elements represent properties of the entire document as a whole, not individual sentences.Instead, we adopted the following evaluation procedure: For each content element type (e.g., “tenant”), we extract all tagged spans from the document, and after deduplication, treat the entities as a set that we then measure against the ground truth in terms of precision, recall, and F$_1$. We do this because there may be multiple ground truth entities and BERT may mark multiple spans in a document with a particular entity type. Note that the metrics are based on exact matches—this means that, for example, if the extracted entity has an extraneous token compared to a ground truth entity, the system receives no credit.Results\\tOur main results are presented in Table TABREF6 on the test set of the regulatory filings and in Table TABREF7 on the test set of the property lease agreements; F$_1$, precision,', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What evaluation metric were used for presenting results? ', context='F$_1$ scores, but we observe similar trends for the other metrics. For both document types, it seems like 60–90 documents are sufficient to achieve F$_1$ on par with using all available training data. Beyond this point, we hit rapidly diminishing returns. For a number of “easy” content elements (e.g., dates in the property lease agreements), it seems like 30 documents are sufficient to achieve good accuracy, and more does not appear to yield substantial improvements. Note that in a few cases, training on more data actually decreases F$_1$ slightly, but this can be attributed to noise in the sampling process.Finally, in Table TABREF8 we show an excerpt from each type of document along with the content elements that are extracted by our BERT models. We provide both the original source Chinese texts as well as English translations to provide the reader with a general sense of the source documents and how well our models behave.Cloud Platform\\tAll the capabilities described in this paper come together in an end-to-end cloud-based platform that we have built. The platform has two main features: First, it provides an annotation interface that allows users to define content elements, upload documents, and annotate documents; a screenshot is shown in Figure', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What evaluation metric were used for presenting results? ', context='and recall are computed in the manner described above. We show metrics across all content elements (micro-averaged) as well as broken down by types. For the property lease agreements, we show results on all documents (left) and only over those with unseen templates (right). Examining these results, we see that although there is some degradation in effectiveness between all documents and only unseen templates, it appears that BERT is able to generalize to previously-unseen expressions of the content elements. Specifically, it is not the case that the model is simply memorizing fixed patterns or key phrases—otherwise, we could just craft a bunch of regular expression patterns for this task. This is a nice result that shows off the power of modern neural NLP models.Overall, we would characterize our models as achieving reasonable accuracy, comparable to extraction tasks in more “traditional” domains, with modest amounts of training data. It does appear that with fine tuning, BERT is able to adapt to the linguistic characteristics of these specialized types of documents. For example, the regulatory filings have quite specialized vocabulary and the property lease agreements have numeric heading structures—BERT does not seem to be confused by these elements, which for the most part do not appear in the texts that the model was pre-trained', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What evaluation metric were used for presenting results? ', context='did not exploit this observation; for example, we made no explicit attempt to induce template structure or apply clustering—although such techniques would likely improve extraction accuracy. In total, we collected and manually annotated 150 filings, which were divided into training, validation, and test sets with a 6:2:2 split. Our test corpus comprises 30 regulatory filings. Table TABREF6 enumerates the seven content elements that we extract.Property Lease Agreements. These contracts mostly follow a fixed “schema” with a certain number of prescribed elements (leaseholder, tenant, rent, deposit, etc.); Table TABREF7 enumerates the eight elements that our model extracts. Since most property lease agreements are confidential, no public corpus for research exists, and thus we had to build our own. To this end, we searched the web for publicly-available templates of property lease agreements and found 115 templates in total. For each template, we manually generated one, two, or three instances, using a fake data generator tool to fill in the missing content elements such as addresses. In total, we created (and annotated) 223 contracts by hand. This corpus was further split into training, validation, and test data with a 6:2:2 split. Our test', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What evaluation metric were used for presenting results? ', context='extracted start and end dates. Note that these post processing steps were not applied in the evaluation presented in the previous section, and so the figures reported in Tables TABREF6 and TABREF7 actually under-report the accuracy of our models in a real-world setting.Conclusions\\tThis work tackles the challenge of content extraction from two types of business documents, regulatory filings and property lease agreements. The problem is straightforwardly formulated as a sequence labeling task, and we fine-tune BERT for this application. We show that our simple models can achieve reasonable accuracy with only modest amounts of training data, illustrating the power and flexibility of modern NLP models. Our cloud platform pulls these models together in an easy-to-use interface for addressing real-world business needs.', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Was the structure of regulatory filings exploited when training the model? ', context='did not exploit this observation; for example, we made no explicit attempt to induce template structure or apply clustering—although such techniques would likely improve extraction accuracy. In total, we collected and manually annotated 150 filings, which were divided into training, validation, and test sets with a 6:2:2 split. Our test corpus comprises 30 regulatory filings. Table TABREF6 enumerates the seven content elements that we extract.Property Lease Agreements. These contracts mostly follow a fixed “schema” with a certain number of prescribed elements (leaseholder, tenant, rent, deposit, etc.); Table TABREF7 enumerates the eight elements that our model extracts. Since most property lease agreements are confidential, no public corpus for research exists, and thus we had to build our own. To this end, we searched the web for publicly-available templates of property lease agreements and found 115 templates in total. For each template, we manually generated one, two, or three instances, using a fake data generator tool to fill in the missing content elements such as addresses. In total, we created (and annotated) 223 contracts by hand. This corpus was further split into training, validation, and test data with a 6:2:2 split. Our test', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Was the structure of regulatory filings exploited when training the model? ', context=\"on. Naturally, accuracy varies across different content elements: For the rental agreements, entities such as leaseholder, tenant, start date, and end date perform much better than others. For the regulatory filing, the model performs well on all content elements except for one; there were very few examples of “% of pledged shares in the shareholder's total share holdings” in our training data, and thus accuracy is very low despite the fact that percentages are straightforward to identify. It seems that “easy” entities often have more fixed forms and are quite close to entities that the model may have encountered during pre-training (e.g., names and dates). In contrast, “difficult” elements are often domain-specific and widely vary in their forms.How data efficient is BERT when fine tuning on annotated data? We can answer this question by varying the amount of training data used to fine tune the BERT models, holding everything else constant. These results are shown in Figure FIGREF10 for the regulatory filings (30, 60, 90 randomly-selected documents) and in Figure FIGREF11 for the property lease agreements (30, 60, 90, 120 randomly-selected documents); in all cases, the development set is fixed. For brevity, we only show\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Was the structure of regulatory filings exploited when training the model? ', context='and recall are computed in the manner described above. We show metrics across all content elements (micro-averaged) as well as broken down by types. For the property lease agreements, we show results on all documents (left) and only over those with unseen templates (right). Examining these results, we see that although there is some degradation in effectiveness between all documents and only unseen templates, it appears that BERT is able to generalize to previously-unseen expressions of the content elements. Specifically, it is not the case that the model is simply memorizing fixed patterns or key phrases—otherwise, we could just craft a bunch of regular expression patterns for this task. This is a nice result that shows off the power of modern neural NLP models.Overall, we would characterize our models as achieving reasonable accuracy, comparable to extraction tasks in more “traditional” domains, with modest amounts of training data. It does appear that with fine tuning, BERT is able to adapt to the linguistic characteristics of these specialized types of documents. For example, the regulatory filings have quite specialized vocabulary and the property lease agreements have numeric heading structures—BERT does not seem to be confused by these elements, which for the most part do not appear in the texts that the model was pre-trained', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Was the structure of regulatory filings exploited when training the model? ', context='twofold: From the scientific perspective, we begin to provide some answers to the above questions. With two case studies, we find that a modest amount of domain-specific annotated data (less than 100 documents) is sufficient to fine-tune BERT to achieve reasonable accuracy in extracting a set of content elements. From a practical perspective, we showcase our efforts in an end-to-end cloud platform that provides an easy-to-use annotation interface as well as an inference interface that allows users to upload documents and inspect the results of our models.Approach\\tWithin the broad space of business documents, we have decided to focus on two specific types: regulatory filings and property lease agreements. While our approach is not language specific, all our work is conducted on Chinese documents. In this section, we first describe these documents and our corpora, our sequence labeling model, and finally our evaluation approach.Approach ::: Datasets\\tRegulatory Filings. We focused on a specific type of filing: disclosures of pledges by shareholders when their shares are offered up for collateral. These are publicly accessible and were gathered from the database of a stock exchange in China. We observe that most of these announcements are fairly formulaic, likely generated by templates. However, we treated them all as natural language text and', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Was the structure of regulatory filings exploited when training the model? ', context='extracted start and end dates. Note that these post processing steps were not applied in the evaluation presented in the previous section, and so the figures reported in Tables TABREF6 and TABREF7 actually under-report the accuracy of our models in a real-world setting.Conclusions\\tThis work tackles the challenge of content extraction from two types of business documents, regulatory filings and property lease agreements. The problem is straightforwardly formulated as a sequence labeling task, and we fine-tune BERT for this application. We show that our simple models can achieve reasonable accuracy with only modest amounts of training data, illustrating the power and flexibility of modern NLP models. Our cloud platform pulls these models together in an easy-to-use interface for addressing real-world business needs.', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What type of documents are supported by the annotation platform?', context='Rapid Adaptation of BERT for Information Extraction on Domain-Specific Business Documents\\tTechniques for automatically extracting important content elements from business documents such as contracts, statements, and filings have the potential to make business operations more efficient. This problem can be formulated as a sequence labeling task, and we demonstrate the adaption of BERT to two types of business documents: regulatory filings and property lease agreements. There are aspects of this problem that make it easier than \"standard\" information extraction tasks and other aspects that make it more difficult, but on balance we find that modest amounts of annotated data (less than 100 documents) are sufficient to achieve reasonable accuracy. We integrate our models into an end-to-end cloud platform that provides both an easy-to-use annotation interface as well as an inference interface that allows users to upload documents and inspect model outputs.\\tIntroduction\\tBusiness documents broadly characterize a large class of documents that are central to the operation of business. These include legal contracts, purchase orders, financial statements, regulatory filings, and more. Such documents have a number of characteristics that set them apart from the types of texts that most NLP techniques today are designed to process (Wikipedia articles, news stories, web pages, etc.): They are heterogeneous and frequently contain a mix', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What type of documents are supported by the annotation platform?', context='twofold: From the scientific perspective, we begin to provide some answers to the above questions. With two case studies, we find that a modest amount of domain-specific annotated data (less than 100 documents) is sufficient to fine-tune BERT to achieve reasonable accuracy in extracting a set of content elements. From a practical perspective, we showcase our efforts in an end-to-end cloud platform that provides an easy-to-use annotation interface as well as an inference interface that allows users to upload documents and inspect the results of our models.Approach\\tWithin the broad space of business documents, we have decided to focus on two specific types: regulatory filings and property lease agreements. While our approach is not language specific, all our work is conducted on Chinese documents. In this section, we first describe these documents and our corpora, our sequence labeling model, and finally our evaluation approach.Approach ::: Datasets\\tRegulatory Filings. We focused on a specific type of filing: disclosures of pledges by shareholders when their shares are offered up for collateral. These are publicly accessible and were gathered from the database of a stock exchange in China. We observe that most of these announcements are fairly formulaic, likely generated by templates. However, we treated them all as natural language text and', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What type of documents are supported by the annotation platform?', context='F$_1$ scores, but we observe similar trends for the other metrics. For both document types, it seems like 60–90 documents are sufficient to achieve F$_1$ on par with using all available training data. Beyond this point, we hit rapidly diminishing returns. For a number of “easy” content elements (e.g., dates in the property lease agreements), it seems like 30 documents are sufficient to achieve good accuracy, and more does not appear to yield substantial improvements. Note that in a few cases, training on more data actually decreases F$_1$ slightly, but this can be attributed to noise in the sampling process.Finally, in Table TABREF8 we show an excerpt from each type of document along with the content elements that are extracted by our BERT models. We provide both the original source Chinese texts as well as English translations to provide the reader with a general sense of the source documents and how well our models behave.Cloud Platform\\tAll the capabilities described in this paper come together in an end-to-end cloud-based platform that we have built. The platform has two main features: First, it provides an annotation interface that allows users to define content elements, upload documents, and annotate documents; a screenshot is shown in Figure', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What type of documents are supported by the annotation platform?', context='FIGREF12. We have invested substantial effort in making the interface as easy to use as possible; for example, annotating content elements is as easy as selecting text from the document. Our platform is able to ingest documents in a variety of formats, including PDFs and Microsoft Word, and converts these formats into plain text before presenting them to the annotators.The second feature of the platform is the ability for users to upload new documents and apply inference on them using a fine-tuned BERT model; a screenshot of this feature is shown in Figure FIGREF13. The relevant content elements are highlighted in the document.On the cloud platform, the inference module also applies a few simple rule-based modifications to post-process BERT extraction results. For any of the extracted dates, we further applied a date parser based on rules and regular expressions to normalize and canonicalize the extracted outputs. In the regulatory filings, we tried to normalize numbers that were written in a mixture of Arabic numerals and Chinese units (e.g., “UTF8gbsn亿”, the unit for $10^8$) and discarded partial results if simple rule-based rewrites were not successful. In the property lease agreements, the contract length, if not directly extracted by BERT, is computed from the', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What type of documents are supported by the annotation platform?', context='did not exploit this observation; for example, we made no explicit attempt to induce template structure or apply clustering—although such techniques would likely improve extraction accuracy. In total, we collected and manually annotated 150 filings, which were divided into training, validation, and test sets with a 6:2:2 split. Our test corpus comprises 30 regulatory filings. Table TABREF6 enumerates the seven content elements that we extract.Property Lease Agreements. These contracts mostly follow a fixed “schema” with a certain number of prescribed elements (leaseholder, tenant, rent, deposit, etc.); Table TABREF7 enumerates the eight elements that our model extracts. Since most property lease agreements are confidential, no public corpus for research exists, and thus we had to build our own. To this end, we searched the web for publicly-available templates of property lease agreements and found 115 templates in total. For each template, we manually generated one, two, or three instances, using a fake data generator tool to fill in the missing content elements such as addresses. In total, we created (and annotated) 223 contracts by hand. This corpus was further split into training, validation, and test data with a 6:2:2 split. Our test', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='Is this style generator compared to some baseline?', context='Low-Level Linguistic Controls for Style Transfer and Content Preservation\\tDespite the success of style transfer in image processing, it has seen limited progress in natural language generation. Part of the problem is that content is not as easily decoupled from style in the text domain. Curiously, in the field of stylometry, content does not figure prominently in practical methods of discriminating stylistic elements, such as authorship and genre. Rather, syntax and function words are the most salient features. Drawing on this work, we model style as a suite of low-level linguistic controls, such as frequency of pronouns, prepositions, and subordinate clause constructions. We train a neural encoder-decoder model to reconstruct reference sentences given only content words and the setting of the controls. We perform style transfer by keeping the content words fixed while adjusting the controls to be indicative of another style. In experiments, we show that the model reliably responds to the linguistic controls and perform both automatic and manual evaluations on style transfer. We find we can fool a style classifier 84% of the time, and that our model produces highly diverse and stylistically distinctive outputs. This work introduces a formal, extendable model of style that can add control to any neural text generation system.\\tIntroduction\\tAll text', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Is this style generator compared to some baseline?', context=\"the Baseline has little to no diversity in the outputs.However, the oracle introduces a huge jump in accuracy for the StyleEQ model, especially compared to the Baseline, partially because the diversity of outputs from StyleEQ is much higher; often the Baseline model produces no diversity – the 16 output sentences may be nearly identical, save a single word or two. It's important to note that neither model uses the classifier in any way except to select the sentence from 16 candidate outputs.What this implies is that lurking within the StyleEQ model outputs are great sentences, even if they are hard to find. In many cases, the StyleEQ model has a classification accuracy above the base rate from the test data, which is 75% (see table:classifiers).Human Evaluation\\ttable:cherrypicking shows example outputs for the StyleEQ and Baseline models. Through inspection we see that the StyleEQ model successfully changes syntactic constructions in stylistically distinctive ways, such as increasing syntactic complexity when transferring to philosophy, or changing relevant pronouns when transferring to sci-fi. In contrast, the Baseline model doesn't create outputs that move far from the reference sentence, making only minor modifications such changing the type of a single pronoun.To determine how readers would classify\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Is this style generator compared to some baseline?', context=\"batch size of 64. We also apply dropout with a drop rate of 0.25 to all embedding layers, the GRUs, and preceptron hidden layer. We train for a maximum of 200 epochs, using validation set BLEU score BIBREF26 to select the final model iteration for evaluation.Models ::: Neural Architecture ::: Selecting Controls for Style Transfer\\tIn the Baseline model, style transfer is straightforward: given an input sentence in one style, fix the encoder content features while selecting a different genre embedding. In contrast, the StyleEQ model requires selecting the counts for each control. Although there are a variety of ways to do this, we use a method that encourages a diversity of outputs.In order to ensure the controls match the reference sentence in magnitude, we first find all sentences in the target style with the same number of words as the reference sentence. Then, we add the following constraints: the same number of proper nouns, the same number of nouns, the same number of verbs, and the same number of adjectives. We randomly sample $n$ of the remaining sentences, and for each of these `sibling' sentences, we compute the controls. For each of the new controls, we generate a sentence using the original input sentence content\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Is this style generator compared to some baseline?', context='our transferred sentences, we recruited three English Literature PhD candidates, all of whom had passed qualifying exams that included determining both genre and era of various literary texts.Human Evaluation ::: Fluency Evaluation\\tTo evaluate the fluency of our outputs, we had the annotators score reference sentences, reconstructed sentences, and transferred sentences on a 0-5 scale, where 0 was incoherent and 5 was a well-written human sentence.table:fluency shows the average fluency of various conditions from all three annotators. Both models have fluency scores around 3. Upon inspection of the outputs, it is clear that many have fluency errors, resulting in ungrammatical sentences.Notably the Baseline often has slightly higher fluency scores than the StyleEQ model. This is likely because the Baseline model is far less constrained in how to construct the output sentence, and upon inspection often reconstructs the reference sentence even when performing style transfer. In contrast, the StyleEQ is encouraged to follow the controls, but can struggle to incorporate these controls into a fluent sentence.The fluency of all outputs is lower than desired. We expect that incorporating pre-trained language models would increase the fluency of all outputs without requiring larger datasets.Human Evaluation ::: Human Classification\\tEach', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='Is this style generator compared to some baseline?', context=\"model. In human evaluations, we encounter the `vampires in space' problem in which content and style are equally discriminative but people focus more on the content.In future work we would like to model higher-level syntactic controls. BIBREF20 show that differences in clausal constructions, for instance having a dependent clause before an independent clause or vice versa, is a marker of style appreciated by the reader. Such features would likely interact with our lower-level controls in an interesting way, and provide further insight into style transfer in text.Acknowledgements\\tKaty Gero is supported by an NSF GRF (DGE - 1644869). We would also like to thank Elsbeth Turcan for her helpful comments.\", score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they perform manual evaluation, what is criteria?', context='our transferred sentences, we recruited three English Literature PhD candidates, all of whom had passed qualifying exams that included determining both genre and era of various literary texts.Human Evaluation ::: Fluency Evaluation\\tTo evaluate the fluency of our outputs, we had the annotators score reference sentences, reconstructed sentences, and transferred sentences on a 0-5 scale, where 0 was incoherent and 5 was a well-written human sentence.table:fluency shows the average fluency of various conditions from all three annotators. Both models have fluency scores around 3. Upon inspection of the outputs, it is clear that many have fluency errors, resulting in ungrammatical sentences.Notably the Baseline often has slightly higher fluency scores than the StyleEQ model. This is likely because the Baseline model is far less constrained in how to construct the output sentence, and upon inspection often reconstructs the reference sentence even when performing style transfer. In contrast, the StyleEQ is encouraged to follow the controls, but can struggle to incorporate these controls into a fluent sentence.The fluency of all outputs is lower than desired. We expect that incorporating pre-trained language models would increase the fluency of all outputs without requiring larger datasets.Human Evaluation ::: Human Classification\\tEach', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they perform manual evaluation, what is criteria?', context='output.Automatic Evaluations ::: Feature Control\\tDesigning controllable language models is often difficult because of the various dependencies between tokens; when changing one control value it may effect other aspects of the surface realization. For example, increasing the number of conjunctions may effect how the generator places prepositions to compensate for structural changes in the sentence. Since our features are deterministically recoverable, we can perturb an individual control value and check to see that the desired change was realized in the output. Moreover, we can check the amount of change in the other non-perturbed features to measure the independence of the controls.We sample 50 sentences from each genre from the test set. For each sample, we create a perturbed control setting for each control by adding $\\\\delta $ to the original control value. This is done for $\\\\delta \\\\in \\\\lbrace -3, -2, -1, 0, 1, 2, 3\\\\rbrace $, skipping any settings where the new control value would be negative.table:autoeval:ctrl shows the results of this experiment. The Exact column displays the percentage of generated texts that realize the exact number of control features specified by the perturbed control. High percentages in the Exact column indicate greater one-to-one correspondence', score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they perform manual evaluation, what is criteria?', context='features. The generated sentences are then reranked using the length normalized log-likelihood under the model. We can then select the highest scoring sentence as our style-transferred output, or take the top-$k$ when we need a diverse set of outputs.The reason for this process is that although there are group-level distinctive controls for each style, e.g. the high use of punctuation in philosophy books or of first person pronouns in gothic novels, at the sentence level it can understandably be quite varied. This method matches sentences between styles, capturing the natural distribution of the corpora.Automatic Evaluations ::: BLEU Scores & Perplexity\\tIn tab:blueperpl we report BLEU scores for the reconstruction of test set sentences from their content and feature representations, as well as the model perplexities of the reconstruction. For both models, we use beam decoding with a beam size of eight. Beam candidates are ranked according to their length normalized log-likelihood. On these automatic measures we see that StyleEQ is better able to reconstruct the original sentences. In some sense this evaluation is mostly a sanity check, as the feature controls contain more locally specific information than the genre embeddings, which say very little about how many specific function words one should expect to see in the', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they perform manual evaluation, what is criteria?', context=\"StyleEq models. For the Baseline, we use a beam search of size 16. For the StyleEQ model, we use the method described in Section SECREF25 to select 16 `sibling' sentences in the target style, and generated a transferred sentence for each. We look at three different methods for selection: all, which uses all output sentences; top, which selects the top ranked sentence based on the score from the model; and oracle, which selects the sentence with the highest classifier likelihood for the intended style.The reason for the third method, which indeed acts as an oracle, is that using the score from the model didn't always surface a transferred sentence that best reflected the desired style. Partially this was because the model score was mostly a function of how well a transferred sentence reflected the distribution of the training data. But additionally, some control settings are more indicative of a target style than others. The use of the classifier allows us to identify the most suitable control setting for a target style that was roughly compatible with the number of content words.In table:fasttext-results we see the results. Note that for both models, the all and top classification accuracy tends to be quite similar, though for the Baseline they are often almost exactly the same when\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they perform manual evaluation, what is criteria?', context=\"heuristics. In contrast, we believe that monolithic style embeddings don't capture the existing knowledge we have about style, and will struggle to disentangle content.Related Work ::: Controlling Linguistic Features\\tSeveral papers have worked on controlling style when generating sentences from restaurant meaning representations BIBREF11, BIBREF12. In each of these cases, the diversity in outputs is quite small given the constraints of the meaning representation, style is often constrained to interjections (like “yeah”), and there is no original style from which to transfer.BIBREF13 investigate using stylistic parameters and content parameters to control text generation using a movie review dataset. Their stylistic parameters are created using word-level heuristics and they are successful in controlling these parameters in the outputs. Their success bodes well for our related approach in a style transfer setting, in which the content (not merely content parameters) is held fixed.Related Work ::: Stylometry and the Digital Humanities\\tStyle, in literary research, is anything but a stable concept, but it nonetheless has a long tradition of study in the digital humanities. In a remarkably early quantitative study of literature, BIBREF14 charts sentence-level stylistic attributes specific to a number of novelists. Half a century\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What metrics are used for automatic evaluation?', context='our transferred sentences, we recruited three English Literature PhD candidates, all of whom had passed qualifying exams that included determining both genre and era of various literary texts.Human Evaluation ::: Fluency Evaluation\\tTo evaluate the fluency of our outputs, we had the annotators score reference sentences, reconstructed sentences, and transferred sentences on a 0-5 scale, where 0 was incoherent and 5 was a well-written human sentence.table:fluency shows the average fluency of various conditions from all three annotators. Both models have fluency scores around 3. Upon inspection of the outputs, it is clear that many have fluency errors, resulting in ungrammatical sentences.Notably the Baseline often has slightly higher fluency scores than the StyleEQ model. This is likely because the Baseline model is far less constrained in how to construct the output sentence, and upon inspection often reconstructs the reference sentence even when performing style transfer. In contrast, the StyleEQ is encouraged to follow the controls, but can struggle to incorporate these controls into a fluent sentence.The fluency of all outputs is lower than desired. We expect that incorporating pre-trained language models would increase the fluency of all outputs without requiring larger datasets.Human Evaluation ::: Human Classification\\tEach', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What metrics are used for automatic evaluation?', context='features. The generated sentences are then reranked using the length normalized log-likelihood under the model. We can then select the highest scoring sentence as our style-transferred output, or take the top-$k$ when we need a diverse set of outputs.The reason for this process is that although there are group-level distinctive controls for each style, e.g. the high use of punctuation in philosophy books or of first person pronouns in gothic novels, at the sentence level it can understandably be quite varied. This method matches sentences between styles, capturing the natural distribution of the corpora.Automatic Evaluations ::: BLEU Scores & Perplexity\\tIn tab:blueperpl we report BLEU scores for the reconstruction of test set sentences from their content and feature representations, as well as the model perplexities of the reconstruction. For both models, we use beam decoding with a beam size of eight. Beam candidates are ranked according to their length normalized log-likelihood. On these automatic measures we see that StyleEQ is better able to reconstruct the original sentences. In some sense this evaluation is mostly a sanity check, as the feature controls contain more locally specific information than the genre embeddings, which say very little about how many specific function words one should expect to see in the', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What metrics are used for automatic evaluation?', context='output.Automatic Evaluations ::: Feature Control\\tDesigning controllable language models is often difficult because of the various dependencies between tokens; when changing one control value it may effect other aspects of the surface realization. For example, increasing the number of conjunctions may effect how the generator places prepositions to compensate for structural changes in the sentence. Since our features are deterministically recoverable, we can perturb an individual control value and check to see that the desired change was realized in the output. Moreover, we can check the amount of change in the other non-perturbed features to measure the independence of the controls.We sample 50 sentences from each genre from the test set. For each sample, we create a perturbed control setting for each control by adding $\\\\delta $ to the original control value. This is done for $\\\\delta \\\\in \\\\lbrace -3, -2, -1, 0, 1, 2, 3\\\\rbrace $, skipping any settings where the new control value would be negative.table:autoeval:ctrl shows the results of this experiment. The Exact column displays the percentage of generated texts that realize the exact number of control features specified by the perturbed control. High percentages in the Exact column indicate greater one-to-one correspondence', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What metrics are used for automatic evaluation?', context='annotator annotated 90 reference sentences (i.e. from the training corpus) with which style they thought the sentence was from. The accuracy on this baseline task for annotators A1, A2, and A3 was 80%, 88%, and 80% respectively, giving us an upper expected bound on the human evaluation.In discussing this task with the annotators, they noted that content is a heavy predictor of genre, and that would certainly confound their annotations. To attempt to mitigate this, we gave them two annotation tasks: which-of-3 where they simply marked which style they thought a sentence was from, and which-of-2 where they were given the original style and marked which style they thought the sentence was transferred into.For each task, each annotator marked 180 sentences: 90 from each model, with an even split across the three genres. Annotators were presented the sentences in a random order, without information about the models. In total, each marked 270 sentences. (Note there were no reconstructions in this annotation task.)table:humanclassifiers shows the results. In both tasks, accuracy of annotators classifying the sentence as its intended style was low. In which-of-3, scores were around', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='What metrics are used for automatic evaluation?', context=\"heuristics. In contrast, we believe that monolithic style embeddings don't capture the existing knowledge we have about style, and will struggle to disentangle content.Related Work ::: Controlling Linguistic Features\\tSeveral papers have worked on controlling style when generating sentences from restaurant meaning representations BIBREF11, BIBREF12. In each of these cases, the diversity in outputs is quite small given the constraints of the meaning representation, style is often constrained to interjections (like “yeah”), and there is no original style from which to transfer.BIBREF13 investigate using stylistic parameters and content parameters to control text generation using a movie review dataset. Their stylistic parameters are created using word-level heuristics and they are successful in controlling these parameters in the outputs. Their success bodes well for our related approach in a style transfer setting, in which the content (not merely content parameters) is held fixed.Related Work ::: Stylometry and the Digital Humanities\\tStyle, in literary research, is anything but a stable concept, but it nonetheless has a long tradition of study in the digital humanities. In a remarkably early quantitative study of literature, BIBREF14 charts sentence-level stylistic attributes specific to a number of novelists. Half a century\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they know what are content words?', context='content, are used as input into the model, along with their POS tags and lemmas.In this way we encourage models to construct a sentence using content and style independently. This will allow us to vary the stylistic controls while keeping the content constant, and successfully perform style transfer. When generating a new sentence, the controls correspond to the counts of the corresponding syntactic features that we expect to be realized in the output.Models ::: Neural Architecture\\tWe implement our feature controlled language model using a neural encoder-decoder with attention BIBREF22, using 2-layer uni-directional gated recurrent units (GRUs) for the encoder and decoder BIBREF23.The input to the encoder is a sequence of $M$ content words, along with their lemmas, and fine and coarse grained part-of-speech (POS) tags, i.e. $X_{.,j} = (x_{1,j},\\\\ldots ,x_{M,j})$ for $j \\\\in \\\\mathcal {T} = \\\\lbrace \\\\textrm {word, lemma, fine-pos, coarse-pos}\\\\rbrace $. We embed each token (and its lemma and POS) before concatenating, and feeding into the encoder GRU to obtain encoder hidden states, $', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they know what are content words?', context=\"found in Table TABREF12.In order to validate the above claims, we train five different classifiers to predict the literary style of sentences from our corpus. Each classifier has gradually more content words replaced with part-of-speech (POS) tag placeholder tokens. The All model is trained on sentences with all proper nouns replaced by `PROPN'. The models Ablated N, Ablated NV, and Ablated NVA replace nouns, nouns & verbs, and nouns, verbs, & adjectives with the corresponding POS tag respectively. Finally, Content-only is trained on sentences with all words that are not tagged as NOUN, VERB, ADJ removed; the remaining words are not ablated.We train the classifiers on the training set, balancing the class distribution to make sure there are the same number of sentences from each style. Classifiers are trained using fastText BIBREF19, using tri-gram features with all other settings as default. table:classifiers shows the accuracies of the classifiers.The styles are highly distinctive: the All classifier has an accuracy of 86%. Additionally, even the Ablated NVA is quite successful, with 75% accuracy, even without access to any content words. The Content only classifier is also quite successful, at\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they know what are content words?', context='counts of function words, to discriminate between literary styles. Stylometry first saw success in attributing authorship to the disputed Federalist Papers BIBREF2, but is recently used by scholars to study things such as the birth of genres BIBREF3 and the change of author styles over time BIBREF4. The use of function words is likely not the way writers intend to express style, but they appear to be downstream realizations of higher-level stylistic decisions.We hypothesize that surface-level linguistic features, such as counts of personal pronouns, prepositions, and punctuation, are an excellent definition of literary style, as borne out by their use in the digital humanities, and our own style classification experiments. We propose a controllable neural encoder-decoder model in which these features are modelled explicitly as decoder feature embeddings. In training, the model learns to reconstruct a text using only the content words and the linguistic feature embeddings. We can then transfer arbitrary content words to a new style without parallel data by setting the low-level style feature embeddings to be indicative of the target style.This paper makes the following contributions:A formal model of style as a suite of controllable, low-level linguistic features that are independent of content.An automatic evaluation showing that our model fools a style', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they know what are content words?', context=\"heuristics. In contrast, we believe that monolithic style embeddings don't capture the existing knowledge we have about style, and will struggle to disentangle content.Related Work ::: Controlling Linguistic Features\\tSeveral papers have worked on controlling style when generating sentences from restaurant meaning representations BIBREF11, BIBREF12. In each of these cases, the diversity in outputs is quite small given the constraints of the meaning representation, style is often constrained to interjections (like “yeah”), and there is no original style from which to transfer.BIBREF13 investigate using stylistic parameters and content parameters to control text generation using a movie review dataset. Their stylistic parameters are created using word-level heuristics and they are successful in controlling these parameters in the outputs. Their success bodes well for our related approach in a style transfer setting, in which the content (not merely content parameters) is held fixed.Related Work ::: Stylometry and the Digital Humanities\\tStyle, in literary research, is anything but a stable concept, but it nonetheless has a long tradition of study in the digital humanities. In a remarkably early quantitative study of literature, BIBREF14 charts sentence-level stylistic attributes specific to a number of novelists. Half a century\", score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they know what are content words?', context='of literary styles, these constructions often co-occur with distinctive content.Stylometrics finds syntactic constructions are great at fingerprinting, but suggests that these constructions are surface realizations of higher-level stylistic decisions. The number and type of personal pronouns is a reflection of how characters feature in a text. A large number of positional prepositions may be the result of a writer focusing on physical descriptions of scenes. In our attempt to decouple these, we create Frankenstein sentences, which piece together features of different styles – we are putting vampires in space.Another way to validate our approach would be to select data that is stylistically distinctive but with similar content: perhaps genres in which content is static but language use changes over time, stylistically distinct authors within a single genre, or parodies of a distinctive genre.Conclusion and Future Work\\tWe present a formal, extendable model of style that can add control to any neural text generation system. We model style as a suite of low-level linguistic controls, and train a neural encoder-decoder model to reconstruct reference sentences given only content words and the setting of the controls. In automatic evaluations, we show that our model can fool a style classifier 84% of the time and outperforms a baseline genre-embedding', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they model style as a suite of low-level linguistic controls, such as frequency of pronouns, prepositions, and subordinate clause constructions?', context='Low-Level Linguistic Controls for Style Transfer and Content Preservation\\tDespite the success of style transfer in image processing, it has seen limited progress in natural language generation. Part of the problem is that content is not as easily decoupled from style in the text domain. Curiously, in the field of stylometry, content does not figure prominently in practical methods of discriminating stylistic elements, such as authorship and genre. Rather, syntax and function words are the most salient features. Drawing on this work, we model style as a suite of low-level linguistic controls, such as frequency of pronouns, prepositions, and subordinate clause constructions. We train a neural encoder-decoder model to reconstruct reference sentences given only content words and the setting of the controls. We perform style transfer by keeping the content words fixed while adjusting the controls to be indicative of another style. In experiments, we show that the model reliably responds to the linguistic controls and perform both automatic and manual evaluations on style transfer. We find we can fool a style classifier 84% of the time, and that our model produces highly diverse and stylistically distinctive outputs. This work introduces a formal, extendable model of style that can add control to any neural text generation system.\\tIntroduction\\tAll text', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they model style as a suite of low-level linguistic controls, such as frequency of pronouns, prepositions, and subordinate clause constructions?', context='of literary styles, these constructions often co-occur with distinctive content.Stylometrics finds syntactic constructions are great at fingerprinting, but suggests that these constructions are surface realizations of higher-level stylistic decisions. The number and type of personal pronouns is a reflection of how characters feature in a text. A large number of positional prepositions may be the result of a writer focusing on physical descriptions of scenes. In our attempt to decouple these, we create Frankenstein sentences, which piece together features of different styles – we are putting vampires in space.Another way to validate our approach would be to select data that is stylistically distinctive but with similar content: perhaps genres in which content is static but language use changes over time, stylistically distinct authors within a single genre, or parodies of a distinctive genre.Conclusion and Future Work\\tWe present a formal, extendable model of style that can add control to any neural text generation system. We model style as a suite of low-level linguistic controls, and train a neural encoder-decoder model to reconstruct reference sentences given only content words and the setting of the controls. In automatic evaluations, we show that our model can fool a style classifier 84% of the time and outperforms a baseline genre-embedding', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they model style as a suite of low-level linguistic controls, such as frequency of pronouns, prepositions, and subordinate clause constructions?', context=\"model. In human evaluations, we encounter the `vampires in space' problem in which content and style are equally discriminative but people focus more on the content.In future work we would like to model higher-level syntactic controls. BIBREF20 show that differences in clausal constructions, for instance having a dependent clause before an independent clause or vice versa, is a marker of style appreciated by the reader. Such features would likely interact with our lower-level controls in an interesting way, and provide further insight into style transfer in text.Acknowledgements\\tKaty Gero is supported by an NSF GRF (DGE - 1644869). We would also like to thank Elsbeth Turcan for her helpful comments.\", score=0),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they model style as a suite of low-level linguistic controls, such as frequency of pronouns, prepositions, and subordinate clause constructions?', context='80% accuracy. This indicates that these stylistic genres are distinctive at both the content level and at the syntactic level.Models ::: Formal Model of Style\\tGiven that non-content words are distinctive enough for a classifier to determine style, we propose a suite of low-level linguistic feature counts (henceforth, controls) as our formal, content-blind definition of style. The style of a sentence is represented as a vector of counts of closed word classes (like personal pronouns) as well as counts of syntactic features like the number of SBAR non-terminals in its constituency parse, since clause structure has been shown to be indicative of style BIBREF20. Controls are extracted heuristically, and almost all rely on counts of pre-defined word lists. For constituency parses we use the Stanford Parser BIBREF21. table:controlexamples lists all the controls along with examples.Models ::: Formal Model of Style ::: Reconstruction Task\\tModels are trained with a reconstruction task, in which a distorted version of a reference sentence is input and the goal is to output the original reference.fig:sentenceinput illustrates the process. Controls are calculated heuristically. All words found in the control word lists are then removed from the reference sentence. The remaining words, which represent the', score=1),\n",
       " CrossEncoderFinetuningDatasetSample(query='How they model style as a suite of low-level linguistic controls, such as frequency of pronouns, prepositions, and subordinate clause constructions?', context='counts of function words, to discriminate between literary styles. Stylometry first saw success in attributing authorship to the disputed Federalist Papers BIBREF2, but is recently used by scholars to study things such as the birth of genres BIBREF3 and the change of author styles over time BIBREF4. The use of function words is likely not the way writers intend to express style, but they appear to be downstream realizations of higher-level stylistic decisions.We hypothesize that surface-level linguistic features, such as counts of personal pronouns, prepositions, and punctuation, are an excellent definition of literary style, as borne out by their use in the digital humanities, and our own style classification experiments. We propose a controllable neural encoder-decoder model in which these features are modelled explicitly as decoder feature embeddings. In training, the model learns to reconstruct a text using only the content words and the linguistic feature embeddings. We can then transfer arbitrary content words to a new style without parallel data by setting the low-level style feature embeddings to be indicative of the target style.This paper makes the following contributions:A formal model of style as a suite of controllable, low-level linguistic features that are independent of content.An automatic evaluation showing that our model fools a style', score=1)]"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 18
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T12:28:18.383658Z",
     "start_time": "2025-09-17T12:28:18.358662Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Total samples in the final fine-tuning dataset\n",
    "len(final_finetuning_data_list)"
   ],
   "id": "f30c1ad8d568dd5f",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "175"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 19
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T12:28:56.720590Z",
     "start_time": "2025-09-17T12:28:56.689590Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Save final fine-tuning dataset\n",
    "import pandas as pd\n",
    "\n",
    "df_finetuning_dataset = pd.DataFrame(final_finetuning_data_list)\n",
    "df_finetuning_dataset.to_csv(\"fine_tuning.csv\")\n",
    "\n",
    "# The finetuning dataset can be found at the below dropbox link:-\n",
    "# https://www.dropbox.com/scl/fi/zu6vtisp1j3wg2hbje5xv/fine_tuning.csv?rlkey=0jr6fud8sqk342agfjbzvwr9x&dl=0"
   ],
   "id": "175cd68fad8672b6",
   "outputs": [],
   "execution_count": 20
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "9d85b7775bc6785e"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T12:29:08.872801Z",
     "start_time": "2025-09-17T12:29:08.860798Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Load fine-tuning dataset\n",
    "\n",
    "finetuning_dataset = final_finetuning_data_list"
   ],
   "id": "edaa5c9ad8e4d807",
   "outputs": [],
   "execution_count": 21
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T12:41:32.241302Z",
     "start_time": "2025-09-17T12:41:10.731490Z"
    }
   },
   "cell_type": "code",
   "source": "!curl -o test.csv \"https://www.dropbox.com/scl/fi/3lmzn6714oy358mq0vawm/test.csv?rlkey=yz16080te4van7fvnksi9kaed&dl=1\"",
   "id": "db516b7ce0d73b42",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n",
      "                                 Dload  Upload   Total   Spent    Left  Speed\n",
      "\n",
      "  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:01 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:02 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:03 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:04 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:05 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:06 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:07 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:08 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:09 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:10 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:11 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:12 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:13 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:14 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:15 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:16 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:17 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:18 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:19 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:20 --:--:--     0\n",
      "  0     0    0     0    0     0      0      0 --:--:--  0:00:21 --:--:--     0\n",
      "curl: (28) Failed to connect to www.dropbox.com port 443 after 21259 ms: Could not connect to server\n"
     ]
    }
   ],
   "execution_count": 27
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T14:14:15.390383Z",
     "start_time": "2025-09-17T14:13:29.460176Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from sentence_transformers import SentenceTransformer\n",
    "\n",
    "# Initialise the cross-encoder fine-tuning engine\n",
    "finetuning_engine = CrossEncoderFinetuneEngine(\n",
    "    dataset=finetuning_dataset, epochs=2, batch_size=8\n",
    ")\n",
    "\n",
    "# Finetune the cross-encoder model\n",
    "finetuning_engine.finetune()"
   ],
   "id": "e5eda3c08fcc3b0",
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'CrossEncoderFinetuneEngine' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[1], line 4\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21;01msentence_transformers\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;28;01mimport\u001B[39;00m SentenceTransformer\n\u001B[0;32m      3\u001B[0m \u001B[38;5;66;03m# Initialise the cross-encoder fine-tuning engine\u001B[39;00m\n\u001B[1;32m----> 4\u001B[0m finetuning_engine \u001B[38;5;241m=\u001B[39m \u001B[43mCrossEncoderFinetuneEngine\u001B[49m(\n\u001B[0;32m      5\u001B[0m     dataset\u001B[38;5;241m=\u001B[39mfinetuning_dataset, epochs\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m2\u001B[39m, batch_size\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m8\u001B[39m\n\u001B[0;32m      6\u001B[0m )\n\u001B[0;32m      8\u001B[0m \u001B[38;5;66;03m# Finetune the cross-encoder model\u001B[39;00m\n\u001B[0;32m      9\u001B[0m finetuning_engine\u001B[38;5;241m.\u001B[39mfinetune()\n",
      "\u001B[1;31mNameError\u001B[0m: name 'CrossEncoderFinetuneEngine' is not defined"
     ]
    }
   ],
   "execution_count": 1
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "5cf2c4c61af49ab7"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
