{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T05:59:24.692440Z",
     "start_time": "2025-01-14T05:59:24.688353Z"
    }
   },
   "outputs": [],
   "source": [
    "from prompt import ROUGH_OUTLINE_PROMPT, SUBSECTION_OUTLINE_PROMPT, EDIT_FINAL_OUTLINE_PROMPT, MERGING_OUTLINE_PROMPT, \\\n",
    "    EDIT_FINAL_OUTLINE_PROMPT2, SUBSECTION_WRITING_PROMPT, LCE_PROMPT\n",
    "from zhipuai import ZhipuAI\n",
    "from rag import *\n",
    "\n",
    "api_key = \"29a6e4a2ee21cc38d721fc63a135b6a5.s4l9JvW1FksKq0xo\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:06:50.386032Z",
     "start_time": "2025-01-14T02:06:50.383349Z"
    }
   },
   "outputs": [],
   "source": [
    "def __generate_prompt(template, paras):\n",
    "    prompt = template\n",
    "    for k in paras.keys():\n",
    "        prompt = prompt.replace(f'[{k}]', paras[k])\n",
    "    return prompt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:06:50.459604Z",
     "start_time": "2025-01-14T02:06:50.456083Z"
    }
   },
   "outputs": [],
   "source": [
    "def extract_title_sections_descriptions(outline):\n",
    "    title = outline.split('Title: ')[1].split('\\n')[0]\n",
    "    sections, descriptions = [], []\n",
    "    for i in range(100):\n",
    "        if f'Section {i + 1}' in outline:\n",
    "            sections.append(outline.split(f'Section {i + 1}: ')[1].split('\\n')[0])\n",
    "            descriptions.append(outline.split(f'Description {i + 1}: ')[1].split('\\n')[0])\n",
    "    return title, sections, descriptions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:06:50.489419Z",
     "start_time": "2025-01-14T02:06:50.485854Z"
    }
   },
   "outputs": [],
   "source": [
    "def extract_subsections_subdescriptions(outline):\n",
    "    subsections, subdescriptions = [], []\n",
    "    for i in range(100):\n",
    "        if f'Subsection {i + 1}' in outline:\n",
    "            subsections.append(outline.split(f'Subsection {i + 1}: ')[1].split('\\n')[0])\n",
    "            subdescriptions.append(outline.split(f'Description {i + 1}: ')[1].split('\\n')[0])\n",
    "    return subsections, subdescriptions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:06:50.504360Z",
     "start_time": "2025-01-14T02:06:50.500423Z"
    }
   },
   "outputs": [],
   "source": [
    "def process_outlines(section_outline, sub_outlines):\n",
    "    res = ''\n",
    "    survey_title, survey_sections, survey_section_descriptions = extract_title_sections_descriptions(\n",
    "        outline=section_outline)\n",
    "    res += f'# {survey_title}\\n\\n'\n",
    "    for i in range(len(survey_sections)):\n",
    "        section = survey_sections[i]\n",
    "        res += f'## {i + 1} {section}\\nDescription: {survey_section_descriptions[i]}\\n\\n'\n",
    "        subsections, subsection_descriptions = extract_subsections_subdescriptions(sub_outlines[i])\n",
    "        for j in range(len(subsections)):\n",
    "            subsection = subsections[j]\n",
    "            res += f'### {i + 1}.{j + 1} {subsection}\\nDescription: {subsection_descriptions[j]}\\n\\n'\n",
    "    return res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:06:50.515574Z",
     "start_time": "2025-01-14T02:06:50.512207Z"
    }
   },
   "outputs": [],
   "source": [
    "def remove_descriptions(text):\n",
    "    \"\"\"\n",
    "    移除文本中所有以 \"Description\" 开头的行。\n",
    "\n",
    "    Args:\n",
    "        text (str): 包含多行文本的字符串。\n",
    "\n",
    "    Returns:\n",
    "        str: 移除以 \"Description\" 开头的行后的文本。\n",
    "    \"\"\"\n",
    "    lines = text.split('\\n')\n",
    "\n",
    "    filtered_lines = [line for line in lines if not line.strip().startswith(\"Description\")]\n",
    "\n",
    "    result = '\\n'.join(filtered_lines)\n",
    "\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:06:50.671816Z",
     "start_time": "2025-01-14T02:06:50.667216Z"
    }
   },
   "outputs": [],
   "source": [
    "def consolidate_rag_result(rag_result, kind=\"all\"):\n",
    "    # 初始化变量\n",
    "    related_paper_list = []  # 用于存储最终的字符串列表\n",
    "    cur_content = \"\"  # 当前正在构建的字符串\n",
    "\n",
    "    # 遍历 paper_id2chunks_list\n",
    "    for paper_id2chunks in rag_result:\n",
    "        # 将 paper_id2chunks 中的内容拼接成一个字符串\n",
    "        if kind == \"simple\":\n",
    "            content = (\n",
    "                f\"paper_title: {paper_id2chunks['paper_title']}\\n\"\n",
    "                f\"paper_metainfo: {paper_id2chunks['original_filename']}\\n\"\n",
    "                f\"chunk_id: {paper_id2chunks['chunk_id']}\\n\"\n",
    "                f\"{paper_id2chunks['chunk']}\\n\"\n",
    "            )\n",
    "        elif kind == \"abstract\":\n",
    "            content = (\n",
    "                f\"paper_title: {paper_id2chunks['paper_title']}\\n\"\n",
    "                f\"{paper_id2chunks['abstract']}\\n\"\n",
    "            )\n",
    "        elif kind == \"introduction\":\n",
    "            content = (\n",
    "                f\"paper_title: {paper_id2chunks['paper_title']}\\n\"\n",
    "                f\"{paper_id2chunks['introduction']}\\n\"\n",
    "            )\n",
    "        elif kind == \"related_works\":\n",
    "            content = (\n",
    "                f\"paper_title: {paper_id2chunks['paper_title']}\\n\"\n",
    "                f\"{paper_id2chunks['related_works']}\\n\"\n",
    "            )\n",
    "        else:\n",
    "            content = (\n",
    "                f\"paper_title: {paper_id2chunks['paper_title']}\\n\"\n",
    "                f\"{paper_id2chunks['abstract']}\\n\"\n",
    "                f\"{paper_id2chunks['introduction']}\\n\"\n",
    "                f\"{paper_id2chunks['related_works']}\\n\"\n",
    "            )\n",
    "\n",
    "        # 如果当前字符串加上新内容后长度超过 100,000，则将当前字符串添加到 strings 列表中，并开始一个新的字符串\n",
    "        if len(cur_content) + len(content) > 100000:\n",
    "            related_paper_list.append(cur_content)\n",
    "            cur_content = content\n",
    "        else:\n",
    "            cur_content += content\n",
    "\n",
    "    # 将最后一个字符串添加到 strings 列表中\n",
    "    if cur_content:\n",
    "        related_paper_list.append(cur_content)\n",
    "\n",
    "    # total_chars = sum(len(s) for s in related_paper_list)\n",
    "    # 输出结果\n",
    "    # for i, string in enumerate(related_paper_list):\n",
    "    #     print(f\"String {i + 1} (length: {len(string)})\")\n",
    "    return related_paper_list\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:06:51.292940Z",
     "start_time": "2025-01-14T02:06:51.289382Z"
    }
   },
   "outputs": [],
   "source": [
    "def do_rag(keyword):\n",
    "    paper_ids_set = set()\n",
    "    result = search_papers(query=keyword, top_k=30)\n",
    "    for i in range(len(result)):\n",
    "        paper_ids_set.add(result[i]['entity']['paper_id'])\n",
    "    result = query_by_title_contain(title=keyword, top_k=100)\n",
    "    for i in range(len(result)):\n",
    "        paper_ids_set.add(result[i]['paper_id'])\n",
    "    result = query_by_chunk_contain(chunk=keyword, top_k=100)\n",
    "    for i in range(len(result)):\n",
    "        paper_ids_set.add(result[i]['paper_id'])\n",
    "    paper_id2chunks_list = search_chunks_by_paper_id(list(paper_ids_set))\n",
    "    return paper_id2chunks_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:06:51.935102Z",
     "start_time": "2025-01-14T02:06:51.931223Z"
    }
   },
   "outputs": [],
   "source": [
    "def do_rag_simple(keyword, top_k=30):\n",
    "    paper_id2chunks_list = []\n",
    "    result = search_papers(query=keyword, top_k=top_k)\n",
    "    for paper in result:\n",
    "        paper_id, paper_title, chunk_id, chunk = paper['id'], paper['entity']['paper_title'], paper['entity'][\n",
    "            'chunk_id'], paper['entity']['chunk_text']\n",
    "        paper_id2chunks = {\n",
    "            'original_filename': paper['entity']['original_filename'],\n",
    "            'paper_id': paper_id,\n",
    "            'paper_title': paper_title,\n",
    "            'chunk_id': chunk_id,\n",
    "            'chunk': chunk,\n",
    "        }\n",
    "        paper_id2chunks_list.append(paper_id2chunks)\n",
    "    return paper_id2chunks_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:06:52.506292Z",
     "start_time": "2025-01-14T02:06:52.497371Z"
    }
   },
   "outputs": [],
   "source": [
    "def zhipu_api(prompt, model='glm-4-plus'):\n",
    "    client = ZhipuAI(api_key=api_key)\n",
    "    response = client.chat.completions.create(\n",
    "        model=model,  # 请填写您要调用的模型名称\n",
    "        messages=[\n",
    "            {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
    "            # {\"role\": \"user\", \"content\": \"以严肃，准确的写作方式，帮我写一段关于损失函数的调研综述，要求如下：\\n1.主题结构清晰，章节标题契合\\n2.前后逻辑关系或脉络关系通顺\\n3.通过融合综述方法实现详细阐述技术或方法，而非简单的笼统总结\\n4.内容长度不低于2000字\"},\n",
    "            # {\"role\": \"user\", \"content\": \"In a serious and precise writing style, generate an outline for a survey on Aspect Based Sentiment Analysis\"},\n",
    "            {\"role\": \"user\", \"content\": prompt},\n",
    "        ],\n",
    "        stream=False,\n",
    "    )\n",
    "    # print(response.choices[0].message.content)\n",
    "    return response.choices[0].message.content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'Certainly! Below is an outline for a paper or presentation on Aspect-Based Sentiment Analysis (ABSA):\\n\\n### Outline: Aspect-Based Sentiment Analysis (ABSA)\\n\\n#### I. Introduction\\n   A. Definition of Sentiment Analysis\\n   B. Overview of Aspect-Based Sentiment Analysis\\n   C. Importance and Applications of ABSA\\n   D. Objectives of the Paper/Presentation\\n\\n#### II. Background and Context\\n   A. Evolution of Sentiment Analysis\\n   B. Traditional Sentiment Analysis vs. Aspect-Based Sentiment Analysis\\n   C. Key Concepts and Terminology\\n      1. Aspect\\n      2. Opinion\\n      3. Sentiment Polarity\\n      4. Contextual Understanding\\n\\n#### III. Methodology\\n   A. Data Collection\\n      1. Sources of Data (e.g., reviews, social media, surveys)\\n      2. Data Preprocessing Techniques\\n   B. Aspect Extraction\\n      1. Rule-Based Methods\\n      2. Supervised Learning Approaches\\n      3. Unsupervised and Semi-Supervised Learning\\n      4. Deep Learning Techniques\\n   C. Sentiment Analysis\\n      1. Lexicon-Based Methods\\n      2. Machine Learning Models\\n      3. Deep Learning Models (e.g., RNN, LSTM, BERT)\\n   D. Aspect-Sentiment Pairing\\n      1. Co-Reference Resolution\\n      2. Dependency Parsing\\n\\n#### IV. Challenges and Solutions\\n   A. Data Quality and Quantity\\n   B. Handling Ambiguity and Contextual Variability\\n   C. Domain-Specific Challenges\\n   D. Multilingual and Cross-Domain Analysis\\n   E. Real-Time Analysis Constraints\\n\\n#### V. Applications of ABSA\\n   A. Business and Marketing\\n      1. Customer Feedback Analysis\\n      2. Product Improvement\\n   B. Healthcare\\n      1. Patient Experience Analysis\\n      2. Drug Reviews\\n   C. Social Media Monitoring\\n   D. Political Analysis\\n   E. Financial Markets\\n\\n#### VI. Case Studies and Examples\\n   A. Successful Implementations of ABSA\\n   B. Comparative Analysis of Different Models\\n   C. Real-World Impact and Outcomes\\n\\n#### VII. Future Directions\\n   A. Advancements in AI and NLP\\n   B. Integration with Other Technologies (e.g., IoT, Big Data)\\n   C. Ethical Considerations and Privacy Concerns\\n   D. Potential New Applications\\n\\n#### VIII. Conclusion\\n   A. Summary of Key Points\\n   B. Reiteration of ABSA’s Significance\\n   C. Final Thoughts and Recommendations\\n\\n#### IX. References\\n   A. Academic Papers\\n   B. Online Resources\\n   C. Books and Journals\\n\\n#### X. Appendices (if applicable)\\n   A. Data Samples\\n   B. Code Snippets\\n   C. Additional Graphs and Tables\\n\\nThis outline provides a structured approach to discussing Aspect-Based Sentiment Analysis, covering its background, methodology, challenges, applications, and future prospects. Each section can be expanded with detailed content, examples, and references to create a comprehensive paper or presentation.'"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "zhipu_api(\"You need to draft a outline about Aspest Based Sentiment Analysis\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:07:03.656374Z",
     "start_time": "2025-01-14T02:06:53.774580Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "17\n"
     ]
    }
   ],
   "source": [
    "topic = 'Aspect Based Sentiment Analysis'\n",
    "paper_id2chunks_list = do_rag(topic)\n",
    "print(len(paper_id2chunks_list))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[{'paper_id': '61e0e9d05244ab9dcb28ca74',\n",
       "  'paper_title': 'Knowledge Graph Augmented Network Towards Multiview Representation Learning for Aspect-based Sentiment Analysis',\n",
       "  'abstract': 'Abstract\\n (difficult) concepts. For rare difficult words, we could comprehend their meanings via their relevant normal words. Inspired by this phenomenon, we employ WordNet as prior knowledge for sentence understanding. For example, the “Tinca” is the subordinate of the “fish genus”, which can be directly related to aspects such as “fish” or “food”, thus alleviating the difficulty of comprehending the sentence.  \\n\\nDifferent from Zhou et al. [17], which directly employs the graph-structure data of the knowledge base, we introduce a simple and efficient strategy to process the knowledge graphs. specifically, semantic matching approaches (see the analysis of different approaches in Sec. 4.3.3) for the task of knowledge graph embedding (KGE) [49] are used to model the semantic relations of knowledge graphs into distributed representations, i.e. , learned knowledge embeddings. In practice, given the graph data in the form of “entity-relation-entity” triples, we train the entity embeddings using an open KGE toolkit, OpenKE 3 [50]. Subsequently, we use the trained knowledge embeddings to initialize a new embedding matrix and then represent the words of $S$ and $T$ with the knowledge embedding matrix. The mapped knowledge embeddings are then concatenated with the hidden state vectors $H_{s}{}^{4}$ . To establish the connection of $S$ and $T$ in knowledge embedding space, we further employ a soft attention mechanism to calculate the semantic relatedness of each word in $S$ and $T$ and capture the most important semantic features as aspectspecific knowledge representations, denoted as $R_{k}$ . For better understanding, taking the sentence “Try the local food , especially the decent Tinca.” and the aspect word “food” as an example, the process of the knowledge branch is illustrated in Fig. 4. Notably, since the context word “Tinca” is the subordinate of the aspect “food” and they are also adjacent to each other in the knowledge embedding space, KGAN could easily capture their relatedness and make the correct prediction.  \\n\\nNotably, it also should be noted that the training of these branches is not independent. Specifically, given an input sentencethe same embedding matrix to convert the aspect pair $\\\\{S,\\\\,T\\\\}$ , the contextual and synta Sand c bra Tinto the hes apply corresponding word embeddings, while the knowledge branch uses another knowledge embedding matrix to map the input entities to knowledge embeddings. As a result, through the parallel processing of three branches, KGAN can capture the aspectspecific information from multiple perspectives simultaneously.\\n\\n# 3.4 Hierarchical Fusion Module\\nSince the above representations different views, directly fusing them may scarcely take advantage $\\\\{R_{c},R_{s},R_{k}\\\\}$ are obtained from of their complementarity. To this end, we adopt a hierarchical fusion module to synergistically fuse these representations in a local-to-global manner, which could effectively boost the performance. An illustration of this fusion module is shown in Fig. 5. For ease of illustration, we employ the “input” to represent the procedures of multiple branches.  \\n\\nIn the local fusion procedure, we first concatenate two of the three feature representations in rows, i.e. ,$[R_{c};\\\\;R_{s}],\\\\;[R_{c};\\\\;R_{k}]$ and $[R_{s};\\\\,R_{k}]$ , where “;” denotes vector concatenation operator. The fused representations are fed into three separate fully connected layers to obtain the predicted sentiment features, denoted as $R_{c s}$ ,$R_{c k}$ and $R_{s k}$ . It is noteworthy that we do not share the parameters of these fully connected layers. Subsequently, to make full use of the complementarity between multiple sentiment features, we further fuse them at the global level. Specifically, $\\\\left[R_{c s},R_{c k},R_{s k}\\\\right]^{T}$ the obtained sentiment features are concatenated in columns, \\', and we feed them into a $3{^\\\\ast}3$ convolution layer i.e. ,to selectively incorporate these features.  \\n\\nThrough the above local and global fusion procedures, we can make the feature representations benefit from each other step by step. In this way, external knowledge could be better integrated with contextual and syntactic information, thus achieving more promising performance.\\n\\n# 3. https://github.com/thunlp/OpenKE.\\n4. Such a process can not only fuse the heterogeneous features (text and graph), but also alleviate the negative effect of sparsity and inaccuracy of knowledge embeddings.  \\n\\n  \\nFig. 5. Illustration of the hierarchical fusion module.  \\n\\nTABLE 1 Statistics of evaluated aspect-level datasets.   \\n\\n\\n<html><body><table><tr><td>Datasets</td><td>Division</td><td>#Positive</td><td>#Negative</td><td>#Neutral</td></tr><tr><td rowspan=\"2\">Laptop14</td><td>Train</td><td>980</td><td>858</td><td>454</td></tr><tr><td>Test</td><td>340</td><td>128</td><td>171</td></tr><tr><td rowspan=\"2\">Restaurant14</td><td>Train</td><td>2159</td><td>800</td><td>632</td></tr><tr><td>Test</td><td>730</td><td>195</td><td>196</td></tr><tr><td rowspan=\"2\">Twitter</td><td>Train</td><td>1567</td><td>1563</td><td>3127</td></tr><tr><td>Test</td><td>174</td><td>174</td><td>346</td></tr><tr><td rowspan=\"2\">Restaurant15</td><td>Train</td><td>912</td><td>256</td><td>36</td></tr><tr><td>Test</td><td>326</td><td>182</td><td>34</td></tr><tr><td rowspan=\"2\">Restaurant16</td><td>Train</td><td>1240</td><td>439</td><td>69</td></tr><tr><td>Test</td><td>469</td><td>117</td><td>30</td></tr></table></body></html>  \\n\\nLast, we cast the output of the convolution layer as the final sentiment prediction, namely, $p$ , and employ the following crossentropy loss function to guide the optimization and training.  \\n\\n$$\\n\\\\mathcal{L}=-\\\\sum_{i}\\\\sum_{j}y_{i}^{j}\\\\log(p_{i}^{j}),\\n$$  \\n\\nwhere $i$ indexes the instance of the ABSA dataset, and $j$ indexes the sentiment polarity.',\n",
       "  'introduction': 'Introduction\\n\\nA Sa fine-grained task of sentiment analysis, aspect-based sentiment analysis (ABSA) has grown to be an active research task in the community of natural language understanding (NLU) [1], [2], [3]. In particular, ABSA refers to judging the sentiment polarities ( e.g. , positive, neutral, and negative) towards the given aspects, which are usually the target entities appearing in the sentence [4]. Taking the sentence “The food was good, but the service was poor.” as an example, as shown in Fig. 1(a), the goal of ABSA is to predict the polarities “positive” and “negative” for the aspects food and service , respectively.  \\n\\nRecent ABSA modeling approaches are mainly based on deep neural networks (DNNs) owing to the capability of automatically extracting semantic features [5]. Specifically, based on the type of learned feature representations, existing DNNs for ABSA can be classified into two groups: context-based methods [6], [7], [8] and syntax-based methods [9], [10], [11]. Context-based methods first employ convolutional neural networks (CNNs) or long short-term memory networks (LSTMs) to extract the features of aspects and context words and then use the attention mechanism to capture the aspect-specific contextual representations. In addition to contextbased methods, syntax-based methods attempt to model the nonlocal dependency trees (a case in point is shown in Fig. 1(b)) of sentences with graph neural networks, e.g. , graph convolutional networks (GCNs) to encode the syntactic information and syntactically connect the aspects with related opinion words [12].  \\n\\n  \\nFig. 1. (a) An example sentence of the ABSA task from the restaurant reviews. There are two aspects with opposite sentiment polarities in this sentence. (b) Illustration of the dependency parsing result.  \\n\\nMore recently, given effective knowledge, e.g. , linguistic and commonsense, for representation approaches in NLU tasks [13], [14], [15], researchers employ external knowledge to augment the semantic features in ABSA models [16], [17], [18], [19]. However, they make extensive modifications to model structures or objectives to encode the different kinds of knowledge, limiting the applicability of their methods to a broader range of tasks and knowledge types. For example, Zhou et al. [17] directly utilized the words ( w.r.t. aspect terms in sentences ) in knowledge graphs as the seed nodes and selected the related nodes to construct the subgraphs. While these subgraph-based methods [17], [20] have achieved remarkable performance, there are still some problems, e.g. , the process of constructing subgraphs is usually relatively complex and would bring more computation, especially when there are many aspect terms. Hence, we attempt to integrate external knowledge from a different perspective.  \\n\\nIn this paper, we propose a novel knowledge graph augmented network, namely, KGAN, to integrate external knowledge for boosting the performance of ABSA task. In general, KGAN employs three parallel branches to learn the feature representations from multiple perspectives ( i.e. , context-, syntax- and knowledgebased). The contextual and syntactic branches are used to extract the explicit context and syntax information from the labeled ABSA data, respectively, as most existing ABSA models do. More specifically, in the knowledge branch, unlike the above previous methods that usually employ complicated approaches to encode the knowledge, we recast them with a simpler and more efficient strategy to incorporate the external knowledge. In practice, instead of directly operating on graph-structure data, we first integrate external knowledge graphs into low-dimensional continuous embeddings, which can be simply and efficiently used to represent sentences and aspects. Then, based on the knowledge embeddings, a soft attention mechanism is utilized to capture the aspect-specific knowledge representations. As a result, we can obtain multiple representations that establish the relations between aspects and opinion words from different views. To take full advantage of the complementarity of these multiview representations, we introduce a novel hierarchical fusion module to effectively fuse them.  \\n\\nWe conduct a comprehensive evaluation of KGAN on SemEval2014 ( i.e. , Laptop14 and Restaurant14), SemEval2015 ( i.e. ,Restaurant15), SemEval2016 ( i.e. , Restaurant16) and Twitter benchmarks. The experimental results show that KGAN achieves comparable performance compared to the prior SOTA model with the GloVe-based setting. Moreover, we also investigate and demonstrate the effectiveness and robustness of our KGAN in BERT- and RoBERTa-based settings. In particular, based on RoBERTa, our model achieves the SOTA performance among all datasets in terms of accuracy and macro-F1 score. More specifically, compared to the prior SOTA models, the accuracy improvements of KGAN on Twitter, Restaurant15 and Restaurant15 datasets are up to $2.49\\\\%$ ,$3.28\\\\%$ and $2.06\\\\%$ , respectively. Finally, we also compare KGAN with the other models in terms of latency and model size and prove that KGAN can achieve a good trade-off between efficiency and performance.  \\n\\nThe main contributions can be summarized as follows:  \\n\\n1) We propose a novel knowledge graph augmented network (KGAN), where different types of information are encoded as multiview representations to augment the semantic features, thus boosting the performance of ABSA.   \\n2) To achieve better complementarity between multiview features, we design a novel hierarchical fusion module to effectively fuse them.   \\n3) Experiments on several commonly used ABSA benchmarks show the effectiveness and universality of our proposed KGAN. In combination with pretrained models, i.e. ,RoBERTa, we achieve new state-of-the-art performance on these benchmarks.  \\n\\nThe rest of this paper is organized as follows. In Sec. 2, we briefly review the related works. In Sec. 3, we introduce our proposed method in detail. Sec. 4 reports and discusses our experimental results. Lastly, we conclude our study in Sec. 5.',\n",
       "  'related_works': 'Related Works\\nS\\n\\n# 2.1 Aspect-based Sentiment Analysis\\nBenefiting from the representation learned from the training data, DNN-based ABSA models have shown promising performance compared to handcrafted feature-based models. We categorize them into two classes, e.g. , context- and syntax-based methods.  \\n\\nFirst, considering the easily obtained contextual information, using CNNs [6], [21], [22], [23], [24] and LSTMs [7], [16], [25], [26], [27], [28] to extract the aspect-specific feature representations from context has become the mainstream approach for ABSA. In particular, owing to the ability to learn sequential patterns, the target-dependent LSTM (TD-LSTM) was proposed by Tang et al. [25] to capture the aspect information. TD-LSTM simplifies connecting the aspect with all context words, neglecting the effect of relative opinion words. Therefore, Wang et al. [26] improved upon the TD-LSTM by introducing an attention mechanism to explore the potential correlations between aspects and opinion words. In the study of Ma et al. [27], two separate LSTMs were used to encode the context and aspect terms, and then an interactive attention mechanism was further proposed to extract the more relevant information between the context and aspect features.  \\n\\nOn the other hand, considering the complexity and inefficiency of LSTM-like sequential models, many studies have attempted to employ more efficient CNNs to capture the compositional structure and n-gram features. Xue and Li [21] proposed a gated convolution network to extract the contextual features and employed the gate mechanism to selectively output the final sentiment features. Huang and Carley [23] introduced two neural units, i.e. ,the parameterized filter and parameterized gate, to incorporate aspect information into CNN. Notably, in CNN-based methods, it is common to employ the average of aspect embeddings as the aspect representation, which would cause the loss of sequence information. To address this issue, Li et al. [6] introduced a targetspecific transformation component based on CNNs to better learn the target-specific representation.  \\n\\nHowever, due to the challenge of multiple aspects with different polarities in a sentence, context-based models usually confuse the connections between aspects and related opinion words. To this end, most recent efforts focus on leveraging the syntactic structure of the sentence to effectively establish the connection [9], [10], [11], [12], [29], [30], [31]. In practice, syntactic dependency trees are introduced to represent the sentence, and then GNNs are used to model the dependency trees and encode the syntactic information. Zhang et al. [9] first utilized dependency trees to represent sentences and then proposed graph convolution networks (GCNs) to exploit syntactical information from dependency trees. Additionally, to better connect the aspect and opinion words syntactically, Wang et al. [12] presented a novel aspect-oriented dependency tree structure and employed a relational graph attention network to encode the tree structure. In addition, regarding sentences that have no remarkable syntactic structure, Pang et al. [30] introduced a multichannel GCN to optimally fuse syntactic and semantic information and their combinations simultaneously. Similarly, in the study of Li et al. [11], a dual GCN model that consists of SemGCN and SynGCN modules was used to take advantage of the complementarity of syntax structure and semantic correlations.  \\n\\n  \\nFig. 2. The architecture of our proposed knowledge graph augmented network (KGAN), which leverages external knowledge graphs to augment contextual and syntactic information. The $R_{c}$ ,$R_{s}$ and $R_{k}$ denote the context- (left), syntax- (middle) and knowledge-based (right) representations, respectively. In the knowledge branch, ANALOGY and DistMult refer to the approaches of Knowledge Graph Embeddings (KGE). The GloVe/BERT/RoBERTa is used to convert the sentence/aspect into word embeddings.\\n\\n# 2.2 Incorporating External Knowledge\\nSince linguistic and commonsense knowledge can be beneficial to understanding natural language, incorporating this knowledge into deep learning models has become an active topic in many fields [13], [14], [32], [33], [34]. A case in point is the ERNIE [32], which employed the large-scale corpora and knowledge graphs to train a knowledge-enhanced pretraining language model. ERNIE experimentally achieves great performance on various knowledgedriven downstream tasks.  \\n\\nHowever, in the task of ABSA, the existing methods fall short in exploring the knowledge to augment the sentiment analysis. One main reason for this is that the above knowledge is not explicitly expressed in the ABSA datasets. Therefore, some recent studies attempt to incorporate external knowledge to alleviate this issue [16], [17], [18], [20], [35], [36], [37]. Wu et al. [35] proposed a unified model to integrate sentiment and structure knowledge with contextual representations for better performance. Zhou et al. [17] proposed jointly encoding syntactic information and external commonsense knowledge, where the knowledge was sampled via the individual nodes. Moreover, in the study of Xing et al. [34], a knowledge-enhanced BERT was introduced to obtain representations enhanced with sentiment domain knowledge to improve ABSA performance.  \\n\\nFollowing this line of research, we introduce knowledge graphs to explicitly provide external knowledge for ABSA. This idea is relatively similar to AR-BERT [20], which incorporates information on aspect-aspect relations in knowledge graphs to improve the performance of existing ABSA models. While ARBERT [20] can achieve encouraging performance with the help of a large-scale knowledge graph, its main focus is on modeling aspect relations (captured by a complex method) from large knowledge graphs. In contrast, we start from the multiview learning perspective and propose a novel ABSA model that uses a simpler and more efficient strategy to model knowledge graphs. Additionally, instead of only integrating external knowledge with contextual or syntactic information, we synergistically combine the knowledge with both contextual and syntactic information to obtain richer feature representations and effectively boost the performance of sentiment analysis.'},\n",
       " {'paper_id': '6556d23d939a5f4082dbc78f',\n",
       "  'paper_title': 'A Self-enhancement Multitask Framework for Unsupervised Aspect Category Detection',\n",
       "  'abstract': 'Abstract\\n\\nOur work addresses the problem of unsupervised Aspect Category Detection using a small set of seed words. Recent works have focused on learning embedding spaces for seed words and sentences to establish similarities between sentences and aspects. However, aspect representations are limited by the quality of initial seed words, and model performances are compromised by noise. To mitigate this limitation, we propose a simple framework that automatically enhances the quality of initial seed words and selects high-quality sentences for training instead of using the entire dataset. Our main concepts are to add a number of seed words to the initial set and to treat the task of noise resolution as a task of augmenting data for a low-resource task. In addition, we jointly train Aspect Category Detection with Aspect Term Extraction and Aspect Term Polarity to further enhance performance. This approach facilitates shared representation learning, allowing Aspect Category Detection to benefit from the additional guidance offered by other tasks. Extensive experiments demonstrate that our framework surpasses strong baselines on standard datasets.',\n",
       "  'introduction': 'Introduction\\n, our framework proposes SEC as described in Algorithm 1 to obtain $T_{a_{i}}$ . To begin, we generate temporary pseudo labels for all given sentences using the initial seed words. Based on the obtained pseudo-labels, we extract nouns and adjectives (called keywords ) in the sentences for each aspect label and then extract keywords that appear in multiple aspects (called boundary keywords ) and obtained $T_{b}$ . At line 6, we calculate the connection between sentences and initial seed words based on the difference between the similarity of the sentence with its two most similar aspects. Note that, if $C o n n e c t i o n(s)\\\\ge\\\\gamma,$ ,in which $\\\\gamma$ is a hyper-parameter, sentences $s$ are considered to have a certain connection with seed words, and if $C o n n e c t i o n(s)\\\\,<\\\\,\\\\gamma$ , there are uncertain connections. At line 12, we extract keywords from the sentences with uncertain connections and obtain $T_{u}$ . Finally, the intersection of $T_{b}$ and $T_{u}$ will be mapped to the relevant aspect. We utilize a variant of clarity scoring function (CronenTownsend et al. ,2002 ) for the automatic mapping. Clarity measures the likelihood of observing a word $w$ in the subset of sentences related to aspect $a_{i}$ , as compared to $a_{j}$ . A higher score indicates a greater likelihood of word w being related to aspect $a_{i}$ .  \\n\\n<html><body><table><tr><td>Algorithm 1: Seedword Enhancement Component (SEC)</td></tr><tr><td>Input: sentence set S, initial seed word set H, threshold </td></tr><tr><td>Output: additional seed word set Ta</td></tr><tr><td>1 begin P ← Pseudo-Label-Generation(S, H) 2</td></tr><tr><td>with Eq. 1; 3 T←Boundary-Keywords-</td></tr><tr><td>Extraction(S, P) ;</td></tr><tr><td>Su←α Initialize set of sentences with uncertain pseudo-label;</td></tr><tr><td>5 for s E S do</td></tr><tr><td>Calculate Connection(s) ; 6</td></tr><tr><td>7 if Connection(s) < then</td></tr><tr><td>8 Add (s) to Su;</td></tr><tr><td>6 end</td></tr><tr><td>10 end 11 Pu ← Pseudo label of Su;</td></tr><tr><td>12 Tu←Keywords-Extraction(Su, Pu）</td></tr><tr><td>Extract keywords from sentences with</td></tr><tr><td>uncertain prediction;</td></tr><tr><td>13 T←T∩Tu ;</td></tr><tr><td>14 Ta ← Auto mapping(T); 15 return Ta; 16end</td></tr></table></body></html>  \\n\\n$$\\nc l a r i t y_{(a_{i},a_{j})}(w)=t_{a_{i}}(w)l o g\\\\frac{t_{a_{i}}(w)}{t_{a_{j}}(w)}\\n$$  \\n\\nwhere $t_{a_{i}}(w)$ and $t_{a_{j}}(w)$ correspond to the $l_{1}$ -normalized TF-IDF scores of $w$ in the sentences annotated pseudo-label with aspect $a_{i}$ and $a_{j}$ , respectively.  \\n\\nIn the training process, after obtaining pseudo labels, SEC recalculates the certainty of connections similar to lines 5 to 10 of Algorithm 1 , then removes uncertain connections $S_{u}$ out of $S$ .  \\n\\nAspect Term Extraction: We extract aspect terms by considering all nouns that appear more than $m$ times in the corpus.  \\n\\nAspect Term Polarity: After generating aspect term pseudo-labels, we find polarity pseudo-labels of terms based on the context window around them. In detail, the generation will be carried out similarly to the ACD subtask with the input being the context window and polarity seed words.',\n",
       "  'related_works': 'Related Works\\ns\\nTopic models were once the dominant approach (Brody and Elhadad ,2010 ;Mukherjee and Liu ,2012 ;Chen et al. ,2014 ) for unsupervised Aspect Category Detection. However, they can produce incoherent aspects. Recently, neural network-based methods have been developed to address this challenge.  \\n\\nCluster Mapping-based resolvers: These methods utilize neural networks to cluster effectively and manually map (many-to-many mapping) the clusters to their corresponding aspects. They employ attention-based autoencoders ( He et al. ,2017 ;Luo et al. ,2019 ) or contrast learning approach ( Shi et al. ,2021 ) for clustering. Shi et al. (2021 ) further enhance performance by using knowledge distillation to learn labels generated after clustering.  \\n\\nSeed words-based resolvers: These approaches automate the aspect category mapping process by utilizing seed words that indicate aspect appearance. Angelidis and Lapata (2018 ) use the weighted sum of seed word representations as aspect representations, allowing mapping one-to-one in the auto-encoder model. Recent works focus on learning embedding spaces for sentences and seed words, generating pseudo labels for weakly supervised learning. They use Skip-gram ( Mikolov et al. ,2013 ) for embedding space learning and convolutional neural networks or linear layers for classification ( Huang et al. ,2020 ;Nguyen et al. ,2021 ). Huang et al. (2020 ) jointly learn ACD with Sentence-level ATP, while Nguyen et al. (2021 )consider the uncertainty of the initial embedding space. Without any human supervision, ( Tulkens and van Cranenburgh ,2020 ;Li et al. ,2022 ) rely solely on label names, similar to seed words. Tulkens and van Cranenburgh (2020 ) detect aspects using cosine similarity between pre-trained aspect and label name representations, while Li et al. (2022 ) train the clustering model with instancelevel and concept-level constraints.\\n\\n# 3 Method\\nOur framework addresses three tasks for which no annotated data is available: Aspect Category Detection (ACD), Aspect Term Extraction (ATE), and Aspect Term Polarity (ATP). ACD involves assigning a given text to one of K pre-defined aspects of interest. ATE extracts OTEs in the text. ATP assigns a sentiment to each OTE. Note that, during training, we do not use any human-annotated samples, but rather rely on a small set of seed words to provide supervision signals.  \\n\\nOur framework called ASeM (short for A S elfenhancement Mutitask Framework), consists of three key components: (i) Pseudo-label generation, (ii) Retrieval-based data augmentation, and (iii) Classification. Figure 1 presents an overview of the framework. Initially, we extract a small subset of the training data to serve as the task-specific in-domain data. Based on the quality of the initial seed words in this dataset, we utilize SEC to expand the set of seed words in order to enhance its quality. By feeding the task-specific in-domain data and enhanced seed words to the pseudo-label generation, we obtain high-quality pseudo labels for the task-specific in-domain data. Then, we leverage the retrieval-based augmentation to enhance the number of training samples from the data bank (the remaining part of the training data), based on our prior knowledge of the target task (seed words, taskspecific in-domain data with high-quality pseudo labels). To this end, the high-quality pseudo labels and augmented data are passed through a multitask classifier to predict the task outputs.  \\n\\n  \\nFigure 1: Overview of our proposed Self-enhancement Multitask (ASeM) framework.\\n\\n# 3.1 Pseudo-label generation\\nThe first step in our framework is to generate pseudo-labels for the three subtasks ACD, ATE, and ATP, on a small unannotated in-domain dataset. In detail, the pseudo-labels for the tasks are created as follows:  \\n\\nAspect Category Detection: First, we map dictionary words into an embedding space by training CBOW ( Mikolov et al. ,2013 ) on the unlabeled training data. Second, we embed sentences from the task-specific in-domain data as $\\\\pmb{s}=s u m(\\\\pmb{w}_{1},\\\\pmb{w}_{2},..,\\\\pmb{w}_{n})$ , in which $\\\\pmb{w}_{i}$ is the representation of the $i^{\\\\mathrm{th}}$ word and $n$ is the sentence length. Similarly, the aspect category representation $\\\\pmb{a}_{i}\\\\,=\\\\,s u m(\\\\pmb{w}_{i,1}^{(a)},\\\\pmb{\\\\dot{w}}_{i,2}^{(a)},..,\\\\pmb{w}_{i,l_{i}}^{(a)})$ , in which $w_{i j}^{(a)}$ is the representation of the $j^{\\\\mathrm{th}}$ seed word of the $i^{\\\\mathrm{th}}$ aspect, and $l_{i}$ is the number of seed words in the $i^{\\\\mathrm{th}}$ aspect. To this end, aspect category pseudo label of a sentence $s$ is defined as follows:  \\n\\n$$\\ny=a r g m a x(s i m(s,a_{i})),1\\\\leq i\\\\leq K\\n$$  \\n\\nwhere $s i m(s,a_{i})$ is the similarity between sentence $s$ aspect $a_{i}$ .en set $G_{a_{i}}=T_{a_{i}}\\\\cup H_{a_{i}},1\\\\le$ $i\\\\leq K$ words, ≤in which $T_{a_{i}}$ is the set of additional seed words. The $H_{a_{i}}$ is the set of given initial seed similarity is calculated as follows:  \\n\\n$$\\ns i m(s,a_{i})=\\\\left\\\\{{\\\\underset{w\\\\in s^{\\\\prime}\\\\cap G_{a_{i}}}{\\\\sum}}\\\\mathbf{w}^{T}\\\\mathbf{s},\\\\quad{\\\\mathrm{if~}}s^{\\\\prime}\\\\cap G_{a_{i}}\\\\neq\\\\emptyset\\n$$  \\n\\nwhere sand a are sentences and aspect representations, respectively. $w$ and $\\\\mathbf{w}$ are a word in a sentence and its representation. $s^{\\\\prime}$ is the set of words in the sentence $s$ .  \\n\\nAs discussed in the introduction, our framework proposes SEC as described in Algorithm 1 to obtain $T_{a_{i}}$ . To begin, we generate temporary pseudo labels for all given sentences using the initial seed words. Based on the obtained pseudo-labels, we extract nouns and adjectives (called keywords ) in the sentences for each aspect label and then extract keywords that appear in multiple aspects (called boundary keywords ) and obtained $T_{b}$ . At line 6, we calculate the connection between sentences and initial seed words based on the difference between the similarity of the sentence with its two most similar aspects. Note that, if $C o n n e c t i o n(s)\\\\ge\\\\gamma,$ ,in which $\\\\gamma$ is a hyper-parameter, sentences $s$ are considered to have a certain connection with seed words, and if $C o n n e c t i o n(s)\\\\,<\\\\,\\\\gamma$ , there are uncertain connections. At line 12, we extract keywords from the sentences with uncertain connections and obtain $T_{u}$ . Finally, the intersection of $T_{b}$ and $T_{u}$ will be mapped to the relevant aspect. We utilize a variant of clarity scoring function (CronenTownsend et al. ,2002 ) for the automatic mapping. Clarity measures the likelihood of observing a word $w$ in the subset of sentences related to aspect $a_{i}$ , as compared to $a_{j}$ . A higher score indicates a greater likelihood of word w being related to aspect $a_{i}$ .  \\n\\n<html><body><table><tr><td>Algorithm 1: Seedword Enhancement Component (SEC)</td></tr><tr><td>Input: sentence set S, initial seed word set H, threshold </td></tr><tr><td>Output: additional seed word set Ta</td></tr><tr><td>1 begin P ← Pseudo-Label-Generation(S, H) 2</td></tr><tr><td>with Eq. 1; 3 T←Boundary-Keywords-</td></tr><tr><td>Extraction(S, P) ;</td></tr><tr><td>Su←α Initialize set of sentences with uncertain pseudo-label;</td></tr><tr><td>5 for s E S do</td></tr><tr><td>Calculate Connection(s) ; 6</td></tr><tr><td>7 if Connection(s) < then</td></tr><tr><td>8 Add (s) to Su;</td></tr><tr><td>6 end</td></tr><tr><td>10 end 11 Pu ← Pseudo label of Su;</td></tr><tr><td>12 Tu←Keywords-Extraction(Su, Pu）</td></tr><tr><td>Extract keywords from sentences with</td></tr><tr><td>uncertain prediction;</td></tr><tr><td>13 T←T∩Tu ;</td></tr><tr><td>14 Ta ← Auto mapping(T); 15 return Ta; 16end</td></tr></table></body></html>  \\n\\n$$\\nc l a r i t y_{(a_{i},a_{j})}(w)=t_{a_{i}}(w)l o g\\\\frac{t_{a_{i}}(w)}{t_{a_{j}}(w)}\\n$$  \\n\\nwhere $t_{a_{i}}(w)$ and $t_{a_{j}}(w)$ correspond to the $l_{1}$ -normalized TF-IDF scores of $w$ in the sentences annotated pseudo-label with aspect $a_{i}$ and $a_{j}$ , respectively.  \\n\\nIn the training process, after obtaining pseudo labels, SEC recalculates the certainty of connections similar to lines 5 to 10 of Algorithm 1 , then removes uncertain connections $S_{u}$ out of $S$ .  \\n\\nAspect Term Extraction: We extract aspect terms by considering all nouns that appear more than $m$ times in the corpus.  \\n\\nAspect Term Polarity: After generating aspect term pseudo-labels, we find polarity pseudo-labels of terms based on the context window around them. In detail, the generation will be carried out similarly to the ACD subtask with the input being the context window and polarity seed words.'},\n",
       " {'paper_id': '634e194790e50fcafd24f36d',\n",
       "  'paper_title': 'PeerDA: Data Augmentation Via Modeling Peer Relation for Span Identification Tasks.',\n",
       "  'abstract': 'Abstract\\n\\nSpan identification aims at identifying specific text spans from text input and classifying them into pre-defined categories. Different from previous works that merely leverage the Subordinate (S UB ) relation (i.e. if a span is an instance of a certain category ) to train models, this paper for the first time explores the Peer (P R) relation, which indicates that two spans are instances of the same category and share similar features .Specifically, a novel Peer Data A ugmentation (PeerDA) approach is proposed which employs span pairs with the P Rrelation as the augmentation data for training. PeerDA has two unique advantages: (1) There are a large number of P Rspan pairs for augmenting the training data. (2) The augmented data can prevent the trained model from overfitting the superficial span-category mapping by pushing the model to leverage the span semantics. Experimental results on ten datasets over four diverse tasks across seven domains demonstrate the effectiveness of PeerDA. Notably, PeerDA achieves state-of-the-art results on six of them.',\n",
       "  'introduction': 'Introduction\\n\\nSpan Identification (SpanID) is a family of Natural Language Processing (NLP) tasks with the goal of detecting specific spans from the input text and further classifying them into pre-defined categories ( Papay et al. ,2020 ). It serves as the initial step for complex text analysis by narrowing down the search scopes of important spans, which holds a pivotal position in the field of NLP ( Ding et al. ,2021 ;Xu et al. ,2021 ). Recently, different domain-specific SpanID tasks, such as social media Named Entity Recognition (NER) ( Derczynski et al. ,2017 ), Aspect-Based Sentiment Analysis (ABSA) ( Liu ,2012 ), Contract Clause Extraction (CCE) ( Chalkidis et al. ,2017 ), Span Based Propaganda Detection (SBPD) ( Da San Martino et al. ,2019 ) and Argument Extraction ( Cheng et al. ,2020 ), have emerged for various NLP applications.  \\n\\n  \\nFigure 1: (a) Illustrations of Subordinate (S UB ) and Peer (P R) relations in SpanID tasks. (b) The constructions of augmented data with P Rrelations in MRC paradigm. We use NER here for demonstration purposes.  \\n\\nPrecisely, as shown in Figure 1 (a), the process of SpanID can be reinterpreted as extracting spancategory Subordinate (S UB ) relation — if a span in the input text is an instance of a certain category .Early works ( Chiu and Nichols ,2016 ) typically tackle SpanID tasks as a sequence tagging problem, where the S UB relation is recognized via predicting the category for each input token under certain context. Recently, to better utilize category semantics, many efforts have been made on reformulating SpanID tasks as a Machine Reading Comprehension (MRC) problem ( Liu et al. ,2020 ;Yang et al. ,2021 ). As shown by the example in Figure 1 (b), such formulation first creates a S UB query for each category and then recognizes the S UB relation by detecting relevant spans in the input text ( i.e. , context) as answers to the category query.  \\n\\nHowever, only leveraging the S UB relation in the training data to build SpanID models may suffer from two limitations: 1) Over-fitting : With only S UB relation, SpanID models tend to capture the superficial span-category correlations. Such correlations may misguide the models to ignore the semantics of the given span but make predictions based on the memorized span-category patterns, which hurts the generalization capability of the models. 2) Data Scarcity : For low-resource scenarios or long-tailed categories, the number of span-category pairs with S UB relation (S UB pairs) could be very limited and insufficient to learn a reliable SpanID model.  \\n\\nIn this paper, we explore the span-span Peer (P R) relation to alleviate the above limitations. Specifically, the P Rrelation indicates that two spans are two different instances of the same category . The major difference between P Rrelation and S UB relation is that the former one intends to correlate two spans without giving the categories they belong to. For example, in Figure 1 (a), \"Hawaii\" and \"London\" are connected with the PRrelation because they are instances of the same category. By jointly recognizing S UB relation and PRrelation in the input text, the model is enforced to favor the usage of span semantics instead of span-category patterns for prediction, reducing the risk of over-fitting. In addition, the number of spanspan pairs with the P Rrelation (P Rpairs) grows quadratically over the number of S UB pairs. Therefore, we can still construct a reasonable number of training data with P Rpairs for categories having insufficient examples.  \\n\\nIn this paper, with the aim of leveraging the P Rrelation to enhance SpanID models, we propose a Peer Data Augmentation ( PeerDA ) approach that treats P Rpairs as a kind of augmented training data. To achieve this, as depicted in Figure 1 (b), we extend the usage of the original training data into two views. The first view is the S UB -based training data. It is used to directly solve the SpanID tasks by extracting the S UB relation, which is the typical formulation of MRC-based approaches. The second view is the P R-based training data. It is our augmentation to enrich the semantics of spans by extracting the P Rrelation in the original training data, where one span is used to identify its peer from the input context. Note that our P R-based training data can be easily formulated into the MRC paradigm. Therefore, the knowledge learned from such augmentation data can be directly transferred to enhance the model’s capability to capture S UB relation ( i.e. , the SpanID tasks).  \\n\\nTo better accommodate the MRC-style S UB and PRdata, we develop a stronger and more memoryefficient MRC model. Compared to the designs in Li et al. (2020b ), our model introduces a bilinear component to calculate the span scores and consistently achieves better performance with a 4 times smaller memory consumption. Besides, we propose a margin-based contrastive learning strategy to additionally model the negative spans to the query ( e.g. , when querying the context in Figure 1 for “ORG” entities, “London” becomes a negative span) so that the spans from different categories are separated more apart in the semantic space.  \\n\\nWe evaluate the effectiveness of PeerDA on ten datasets across seven domains, from four different SpanID tasks, namely, NER, ABSA, CCE, and SBPD. Experimental results show that extracting P Rrelation benefits the learning of semantics and encourages models to identify more possible spans. As a result, PeerDA is a new state-of-the-art (SOTA) method on six SpanID datasets. Our analyses further demonstrate the capability of PeerDA to alleviate scarcity and over-fitting issues.  \\n\\nOur contributions are summarized as follows:  \\n\\n•We propose a novel PeerDA approach to tackle SpanID tasks via augmenting training data with PRrelation.   \\n•We conduct extensive experiments on ten datasets, including four different SpanID tasks across seven domains, and achieve SOTA performance on six SpanID datasets.   \\n•PeerDA is more effective in low-resource scenarios or long-tailed categories and thus, it alleviates the scarcity issue. Meanwhile, jointly recognizing the S UB and P Rrelations makes the MRC model rely less on memorizing the S UB patterns in the training set for inferring the span label, which prevents over-fitting.',\n",
       "  'related_works': 'Related Works\\n. For Social21 , we compare with top three approaches on its leaderboard, namely, Volta ( Gupta et al. ,2021 ), HOMADOS ( Kaczy´nski and Przybyła ,2021 ), and TeamFPAI ( Hou et al. ,2021 ).  \\n\\nCCE: We compare with (1) MRC basline, (2) stronger text encoders, including ALBERT ( Lan et al. ,2019 ) and DeBERTa ( He et al. ,2020 ), (3) the model continually pretrained on contracts: RoBERTa $+\\\\,\\\\mathrm{CP}$ (Hendrycks et al. ,2021 ) and (4) the model leveraged the contract structure: ConReader (Xu et al. ,2022a ).\\n\\n# 5 Results\\n\\n# 5.1 Comparison Results\\nNER: Table 2 shows the performance on five NER datasets. Our PeerDA significantly outperforms the Tagging and MRC baselines. Precisely, compared to RoBERTa base MRC, PeerDA obtains 0.3, 6.0, 3.2, 1.5, and $2.9~\\\\mathrm{F_{1}}$ gains on five datasets respectively. When implemented on RoBERTa large , our PeerDA can further boost the performance and establishes new SOTA on three datasets, namely, OntoNotes5 ,Movie , and Restaurant . Note that the major improvement of PeerDA over MRC comes from higher Recall. It implies that PeerDA encourages models to give more span predictions.  \\n\\n<html><body><table><tr><td rowspan=\"2\">Methods</td><td colspan=\"2\">Lap14</td><td colspan=\"2\">Rest14</td></tr><tr><td>UABSA</td><td>ATE</td><td>UABSA</td><td>ATE</td></tr><tr><td>SPAN-BERT</td><td>61.3</td><td>82.3</td><td>73.7</td><td>86.7</td></tr><tr><td>IMN-BERT</td><td>61.7</td><td>77.6</td><td>70.7</td><td>84.1</td></tr><tr><td>RACL</td><td>63.4</td><td>81.8</td><td>75.4</td><td>86.4</td></tr><tr><td>Dual-MRC</td><td>65.9</td><td>82.5</td><td>76.0</td><td>86.6</td></tr><tr><td>MRC (Large)</td><td>63.2</td><td>83.9</td><td>72.9</td><td>86.8</td></tr><tr><td>PeerDA</td><td>65.9</td><td>84.6</td><td>73.9</td><td>86.8</td></tr></table></body></html>  \\n\\nTable 3: Performance on two ABSA subtasks on two datasets. Results are averages $\\\\mathrm{F_{1}}$ over 5 runs.   \\nTable 4: PeerDA performance on two SBPD datasets.   \\n\\n\\n<html><body><table><tr><td rowspan=\"2\">Methods</td><td colspan=\"3\">News20</td><td colspan=\"3\">Social21</td></tr><tr><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td></tr><tr><td>Volta</td><td></td><td></td><td></td><td>50.1</td><td>46.4</td><td>48.2</td></tr><tr><td>HOMADOS</td><td></td><td></td><td></td><td>41.2</td><td>40.3</td><td>40.7</td></tr><tr><td>TeamFPAI</td><td></td><td></td><td></td><td>65.2</td><td>28.6</td><td>39.7</td></tr><tr><td>MRC(Base)</td><td>10.5</td><td>53.5</td><td>17.6</td><td>55.8</td><td>43.5</td><td>48.9</td></tr><tr><td>PeerDA</td><td>21.8</td><td>31.5</td><td>25.8</td><td>49.4</td><td>70.6</td><td>58.1</td></tr></table></body></html>  \\n\\nABSA: Table 3 depicts the results on ABSA. Compared to previous approaches, PeerDA mostly achieves better results on two subtasks, where it outperforms vanilla MRC by 2.7 and $1.0\\\\ \\\\mathrm{F_{1}}$ on UABSA for two domains respectively.  \\n\\nSBPD: The results of two SBPD tasks are presented in Table 4 . PeerDA outperforms MRC by 8.2 and $9.2\\\\,\\\\mathrm{F_{1}}$ and achieves SOTA performance on News20 and Social21 respectively.  \\n\\nCCE: The results of CCE are shown in Table 5 .PeerDA surpasses MRC by 8.7 AUPR and $13.3\\\\;\\\\mathrm{P}@0.8\\\\mathrm{R}$ and even surpasses the previous best model of larger size ( ConReader large ) by 3.2 AUPR, reaching SOTA performance on CUAD .\\n\\n# 5.2 Analysis on Augmentation Strategies\\nTo explore how the size and category distribution of the augmented data affect the SpanID tasks, we conduct ablation study on the three augmenTable 6: Ablation study on data augmentation strategies. The results $\\\\mathrm{F_{1}}$ for NER, UABSA, and SBPD. AUPR for CCE) are averaged of all datasets in each task.  \\n\\n<html><body><table><tr><td>Methods</td><td>#Params</td><td>AUPR</td><td>P@0.8R</td></tr><tr><td>ALBERTxxlarge</td><td>223M</td><td>38.4</td><td>31.0</td></tr><tr><td>RoBERTabase + CP</td><td>125M</td><td>45.2</td><td>34.1</td></tr><tr><td>RoBERTalarge</td><td>355M</td><td>48.2</td><td>38.1</td></tr><tr><td>DeBERTaxlarge</td><td>900M</td><td>47.8</td><td>44.0</td></tr><tr><td>ConReaderlarge</td><td>355M</td><td>49.1</td><td>44.2</td></tr><tr><td>MRC(Base)</td><td>125M</td><td>43.6</td><td>32.2</td></tr><tr><td>PeerDA</td><td>125M</td><td>52.3</td><td>45.5</td></tr></table></body></html>  \\n\\nTable 5: PeerDA performance on CCE.   \\n\\n\\n<html><body><table><tr><td>Ablation Type</td><td>NER</td><td>UABSA</td><td>SBPD</td><td>CCE</td><td>Avg.</td></tr><tr><td>MRC</td><td>72.7</td><td>68.1</td><td>33.3</td><td>43.6</td><td>54.4</td></tr><tr><td>PeerDA-Size</td><td>74.6</td><td>69.7</td><td>38.5</td><td>48.7</td><td>57.9</td></tr><tr><td>PeerDA-Categ</td><td>74.2</td><td>69.3</td><td>40.4</td><td>51.3</td><td>58.8</td></tr><tr><td>PeerDA-Both (final)</td><td>75.5</td><td>69.9</td><td>42.0</td><td>52.3</td><td>59.9</td></tr></table></body></html>  \\n\\ntation strategies mentioned in Sec. 3.1.2 , depicted in Table 6 . Overall, all of the PeerDA variants are clearly superior to the MRC baseline and the PeerDA-both considering both data size and distribution issues performs the best. Another interesting finding is that PeerDA-Categ significantly outperforms PeerDA-Size on SBPD and CCE. We attribute the phenomenon to the fact that SBPD and CCE have a larger number of categories and consequently, the MRC model is more prone to the issue of skewed data distribution. Under this circumstance, PeerDA-Categ, the variant designed for compensating the long-tailed categories, can bring larger performance gains over MRC model. On the other hand, if the skewed data distribution is not severe (e.g. NER), or the category shows a weak correlation with the spans (i.e. UABSA), PeerDASize is more appropriate than PeerDA-Categ.'},\n",
       " {'paper_id': '6433f67f90e50fcafd6db326',\n",
       "  'paper_title': 'UTC-IE: A Unified Token-pair Classification Architecture for Information Extraction',\n",
       "  'abstract': 'Abstract\\n\\nInformation Extraction (IE) spans several tasks with different output structures, such as named entity recognition, relation extraction and event extraction. Previously, those tasks were solved with different models because of diverse task output structures. Through re-examining IE tasks, we find that all of them can be interpreted as extracting spans and span relations. They can further be decomposed into tokenpair classification tasks by using the start and end token of a span to pinpoint the span, and using the start-to-start and end-to-end token pairs of two spans to determine the relation. Based on the reformulation, we propose a Unified Token-pair Classification architecture for I nformation Extraction ( UTC-IE ), where we introduce Plusformer on top of the tokenpair feature matrix. Specifically, it models axis-aware interaction with plus-shaped selfattention and local interaction with Convolutional Neural Network over token pairs. Experiments show that our approach outperforms task-specific and unified models on all tasks in 10 datasets, and achieves better or comparable results on 2 joint IE datasets. Moreover, UTCIE speeds up over state-of-the-art models on IE tasks significantly in most datasets, which verifies the effectiveness of our architecture.',\n",
       "  'introduction': 'Introduction\\n\\nInformation Extraction (IE) aims to identify and classify structured information from unstructured texts ( Andersen et al. ,1992 ;Grishman ,2019 ). IE consists of a wide range of tasks, such as named entity recognition (NER), joint entity relation extraction $(\\\\mathrm{RE})^{2}$ and event extraction (EE) .  \\n\\nIn the last decade, many paradigms have been proposed to solve IE tasks, such as sequence labeling ( McCallum and Li ,2003 ;Huang et al. ,2015 ;Zheng et al. ,2017 ;Yu et al. ,2020a ), span-based classification ( Jiang et al. ,2020 ;Yu et al. ,2020b ;Wang et al. ,2021 ;Ye et al. ,2022 ), MRC-based methods ( Levy et al. ,2017 ;Li et al. ,2020 ;Liu et al. ,2020 ) and generation-based methods ( Zeng et al. ,2018 ;Yan et al. ,2021a ;Hsu et al. ,2022 ). The above work mainly concentrates on solving individual tasks, but it is desired to unify all IE tasks without designing dedicated modules, as tackling all IE tasks with one model can facilitate knowledge sharing between different tasks. Therefore, various attempts have been made to unify all IE tasks with one model structure. Wadden et al. (2019 ); Lin et al. (2020 ); Nguyen et al. (2021 ) encode all IE tasks’ target structure as graphs and design graph-based methods to predict them; Paolini et al. (2021 ); Lu et al. (2022 ) solve general IE tasks in a generative way with text-to-text or text-to-structure frameworks. However, graph-based models tend to be complex to design, while generative models are time-consuming to decode.  \\n\\nIn our work, we creatively propose a simple yet effective paradigm for unified IE. Inspired by Jiang et al. (2020 ), we re-examine IE tasks and consider that all of them are fundamentally span extraction (entity extraction in NER and RE, trigger extraction and argument span detection in EE) or relational extraction 3 (relation extraction in RE and argument role classification in EE). Based on this perspective, we further simplify and unify all IE tasks into tokenpair classification tasks . Figure 1 shows how each task can be converted. Specifically, a span is decomposed into start-to-end and end-to-start token pairs. As depicted, the entity “School of Computer Science” in Figure 1 (a) is decomposed into indices of (School, Science) and (Science, School). As for detecting the relation between two spans, we convert it into start-to-start and end-to-end token pairs from head mention to tail mention. For example, in Figure 1 (b), the relation “Author” between “J.K. Rowling” and “Harry Potter novels” is decomposed into indices of (J.K., Harry) and (Rowling, novels).  \\n\\n  \\nFigure 1: An illustration of the token-pair decomposition for IE tasks. Each cell represents one token pair, and it can be classified into pre-defined types. $e,r,t,a$ and $r o l$ in figures mean entity, relation, event trigger, event argument and event role. For the span extraction, we use the start-to-end and end-to-start token pairs to pinpoint the span, such as entity spans $e_{1},e_{2}$ , argument spans $a_{1},a_{2}$ and trigger span $t$ (cells with pure color). For the relational extraction, we use the start-to-start and end-to-end token pairs to represent the relation, such as $r$ and $r o l_{1},r o l_{2}$ (cells with gradient color). It is worth mentioning that both relations and event roles are regarded as directional, namely from start entity to end entity and from event trigger to argument spans. Therefore, all IE tasks can be decomposed into token-pair classifications. After the reformulation, the local dependency and interaction from the plus-shaped orientation (as the orange and blue dotted lines depict) can provide vital information to classify the central token pair.  \\n\\nBased on the above decomposition, we propose a Unified Token-pair Classification architecture for I nformation Extraction ( UTC-IE ). Specifically, we first apply Biaffine model on top of the pre-trained language model (PLM) to get representations of token pairs. Then we design a novel Transformer to obtain interactions between them. As the plusshaped dotted lines depicted in Figure 1 , token pairs in horizontal and vertical directions cover vital information for the central token pair. For span extraction, token pairs in the plus-shaped orientation are either clashing or nested with the central token pair, for example, $e_{2}$ is contained by $e_{1}$ in Figure 1 (a); for relational extraction, the central token pair’s two constituent mentions locate in the plus-shaped orientation, such as in Figure 1 (b), $r$ is determined by $e_{1}$ and $e_{2}$ . Therefore, we make one token pair only attend horizontally and vertically in the token pair feature matrix. Additionally, position embeddings are incorporated to keep the token pairs position-aware. Moreover, neighboring token pairs are highly likely to be informative to determine the types of the central token pair, so we apply Convolutional Neural Network (CNN) to model the local interaction after the plus-shaped attention. Since the attention map for one token pair is intuitively similar to the plus operator, we name this whole novel module as Plusformer .  \\n\\nWe conduct numerous experiments in two settings. When training separately on each task (named as single IE task ), our model outperforms previous task-specific and unified models on 10 datasets of all IE tasks. When training a single model simultaneously on all IE tasks in one dataset (named as joint IE task ), UTC-IE achieves better or comparable results than 2 joint IE baselines. To thoroughly analyze why UTC-IE is useful under the token-pair paradigm, we execute several ablation studies. We observe that CNN module in Plusformer plays a significant role in IE tasks by the abundant local dependency between token pairs after the reformulation. Besides, owing to the good parallelism of self-attention and CNN, UTC-IE is one to two orders of magnitude faster than prior unified IE models and some task-specific work. To summarize, our key contributions are as follows 1. We introduce UTC-IE, which decomposes all IE tasks into token-pair classification tasks .  \\n\\nIn this way, we can unify all single IE tasks under the same task formulation, and use one model to fit all IE tasks without designing taskspecific modules. Besides, this unified decomposition is much faster than recently proposed generation-based unified frameworks.  \\n\\n2. After the reformulation of different IE tasks, we propose the Plusformer to model interaction between different token pairs. The plusshaped self-attention and CNN in Plusformer are well-motivated and effective in the reformulated IE scenario. Experiments in 12 IE datasets all achieve state-of-the-art (SOTA) performance which justifies the superiority of Plusformer in IE tasks.  \\n\\n3. The reformulation enables us to use one model to fit all IE tasks concurrently. Therefore, we can train one model on three IE tasks, and results on two joint IE datasets show that the proposed unification can effectively benefit each IE task through multi-task learning.  \\n\\n4. Extensive ablation experiments reveal that components in Plusformer are necessary and beneficial. Among them, CNN module in Plusformer can be essential to the overall performance. Analysis shows that this performance gain is well-explained because when reformulating IE tasks into token-pair classifications, the adjacent token pairs can be informative and CNN can take good advantage of the local dependency between them.',\n",
       "  'related_works': 'Related Works\\n\\nInformation extraction tasks, which consist of named entity recognition, relation extraction, and event extraction, have long been a fundamental and well-researched task in the natural language processing (NLP) field. Previous researches mainly only focus on one or two tasks. Recently, building joint neural models of unified IE tasks has attracted increasing attention. Some of them incorporate graphs into IE structure. Wadden et al. (2019 )propose a unified framework called DYGIE $^{++}$ to extract entities, relations and events by leveraging span representations via span graph updates. Lin et al. (2020 ) and Nguyen et al. (2021 ) extend $\\\\mathrm{DYGIE++}$ by incorporating global features to extract cross-task and cross-instance interactions with multi-task learning. In addition to the graph-based models mentioned above, other studies focus on tackling general IE by generative models. Paolini et al. (2021 ) construct a framework called TANL, which enhances the generation model using augmented language methods. Moreover, Lu et al. (2022 ) regard IE task as a text-to-structure generation task, and leveraging prompt mechanism.  \\n\\nTable 4: Ablation studies in the NER, RE and EE datasets. CNN-IE is similar to UTC-IE except that it is deprived of the PlusAttention. Underlines mean the most dropped factor. ♣means that the CNN-IE surpasses previous SOTA performance.   \\n\\n\\n<html><body><table><tr><td></td><td>ACE05-Ent</td><td colspan=\"2\">ACE05-Rbert</td><td colspan=\"2\">ACE05-E+</td></tr><tr><td></td><td>Ent.</td><td>Ent.</td><td>Rel.</td><td>Trig.</td><td>Arg.</td></tr><tr><td>UTC-IE</td><td>87.7535</td><td>88.8212</td><td>64.9433</td><td>73.4455</td><td>57.6878</td></tr><tr><td>-CNN</td><td>87.3922</td><td>88.7122</td><td>63.5583</td><td>72.9834</td><td>56.7499</td></tr><tr><td>- positon embeddings</td><td>87.5334</td><td>88.7320</td><td>64.2956</td><td>73.1298</td><td>57.0280</td></tr><tr><td>-axis-aware</td><td>87.5927</td><td>88.7919</td><td>63.9155</td><td>73.2946</td><td>56.8798</td></tr><tr><td>CNN-IE</td><td></td><td>88.706</td><td>64.6726 26</td><td>73.0499</td><td>56.9763</td></tr></table></body></html>  \\n\\nWe unify all IE tasks as several token-pair classification tasks, which are fundamentally similar to the span-based methods on the IE task, for the start and end tokens can locate a span. Numerous NER studies emerge on span-based models, which are compatible with both flat and nested entities and perform well ( Eberts and Ulges ,2020 ;Yu et al. ,2020b ;Li et al. ,2021 ;Zhu and Li ,2022 ). In addition to entities, the span-based method is also used in RE. Some models ( Wang et al. ,2021 ;Ye et al. ,2022 ) only leverage span representations to locate entities and simply calculate the interaction between entity pair, while others ( Wang et al. ,2020 ;Zhong and Chen ,2021 ) encode span pair information explicitly to extract relations. With regard to event extraction, as far as we know, there is little work on injecting span information into EE explicitly. Wadden et al. (2019 ) leverage span representations on general IE, but their model is complicated and only considers span at the embedding layer without further interaction. Conceptually, Jiang et al. (2020 )’s work is similar to ours, but they need a two-stage model to determine the span type and span relations, respectively. Detailed analysis are depicted in Appendix G. Although many spanbased IE models exist, they are task-specific and lack interaction between token pairs. Decomposing IE tasks as token-pair classification and conducting interaction between token pairs can uniformly model span-related knowledge and advance SOTA performance.  \\n\\nThe most novel component of Plusformer is the plus-shaped attention mechanism, which can make token pairs interact with each other in an efficient way. A similar structure called Axial Transformers ( Ho et al. ,2019 ) is proposed in the Computer Vision (CV) field, which is designed to deal with data organized as high-dimension tensors. Tan et al. (2022 ) incorporate axial attention into relation classification to improve the performance on two-hop relation. However, CNN was not used in these works, while CNN has been proven to be vital to the IE tasks. Another similar structure named Twin Transformer ( Guo et al. ,2021 ) used in CV, where they encode pixels of image from row and column sequentially, and leverage CNN on top of them. But the position embeddings, which are important for IE tasks, are not used in the Twin Transformer. Besides, we want to point out that the usage of plus-shaped attention and CNN originates from the reformulation of IE tasks, any other modules which can directly enable interaction between constituent spans of a relation and between adjacent token pairs should be beneficial.'},\n",
       " {'paper_id': '6350bc6790e50fcafdeceb9f',\n",
       "  'paper_title': 'Improving Aspect Sentiment Quad Prediction Via Template-Order Data Augmentation',\n",
       "  'abstract': 'Abstract\\n\\nRecently, aspect sentiment quad prediction (ASQP) has become a popular task in the field of aspect-level sentiment analysis. Previous work utilizes a predefined template to paraphrase the original sentence into a structure target sequence, which can be easily decoded as quadruplets of the form ( aspect category ,aspect term ,opinion term ,sentiment polarity ). The template involves the four elements in a fixed order. However, we observe that this solution contradicts with the order-free property of the ASQP task, since there is no need to fix the template order as long as the quadruplet is extracted correctly. Inspired by the observation, we study the effects of template orders and find that some orders help the generative model achieve better performance. It is hypothesized that different orders provide various views of the quadruplet. Therefore, we propose a simple but effective method to identify the most proper orders, and further combine multiple proper templates as data augmentation to improve the ASQP task. Specifically, we use the pre-trained language model to select the orders with minimal entropy. By finetuning the pre-trained language model with these template orders, our approach improves the performance of quad prediction, and outperforms state-of-the-art methods significantly in low-resource settings 1 .\\n\\n# 1 Introduction\\nThe aspect sentiment quad prediction (ASQP) task, aiming to extract aspect quadruplets from a review sentence, becomes popular recently ( Zhang et al. ,2021a ;Cai et al. ,2021 ). The quadruplet consists of four sentiment elements: 1) aspect category (ac) indicating the aspect class; 2) aspect term (at) which is the specific aspect description; 3) opinion term (ot) which is the opinion expression towards the aspect; 4) sentiment polarity $(s p)$ denoting the sentiment class of the aspect. For example, the sentence “The service is good and the restaurant is clean.” contains two quadruplets ( service general ,service ,good ,positive ) and ( ambience general ,restaurant ,clean ,positive ).  \\n\\nFigure 1: An example sentence is paraphrased into a target sequence with a fixed-order template ( Zhang et al. ,2021a ). Our approach employs special markers to form free-order templates and generates multiple target sequences. $O_{i}$ is the $i$ -th order permutation of the four elements.   \\n\\n\\n<html><body><table><tr><td>Originalsentence</td><td>Therestaurantisclean.</td></tr><tr><td>Quadruplet (ac,at,ot,sp)</td><td>(ambiencegeneral,restaurant,clean,positive)</td></tr><tr><td>Semanticquadruplet (xacxatxoxsp）</td><td>(ambiencegeneral,restaurant,clean,great)</td></tr><tr><td>Fixed-ordertemplate</td><td>Xac is xsp because xat is xot</td></tr><tr><td>Targetsequence</td><td>ambiencegeneralisgreatbecauserestaurant is clean</td></tr><tr><td>Free-ordertemplate</td><td>O,([AC] xac,[AT] xa [OT] x [SP] x);i ∈ [1,24]</td></tr><tr><td>Multipletargetsequences</td><td>[AC] xac [AT] x [OT] x [SP] xsp [AT] x [AC] xac [OT] xr [SP] xsp</td></tr></table></body></html>  \\n\\nTo extract aspect sentiment quadruplets, Zhang et al. (2021a ) propose a new paradigm which transforms the quadruplet extraction into paraphrase generation problem. With pre-defined rules, they first map the four elements of $(a c,a t,o t,s p)$ into semantic values $(x_{a c},\\\\,x_{a t},\\\\,x_{o t},\\\\,x_{s p})$ , which are then fed into a template to obtain a nature language target sequence. As shown in Figure 1 , the original sentence is “re-writen” into a target sequence by paraphrasing. After fine-tuning the pre-trained language model ( Raffel et al. ,2020 ) in such a sequence-to-sequence learning manner, the quadruplets can be disentangled from the target sequence.  \\n\\nThough promising is this paradigm, one issue is that the decoder of the generative pre-trained language model ( Raffel et al. ,2020 ) is unidirectional (Vinyals et al. ,2015 ), which outputs the target sequence from the beginning of the sequence to its end. Thus four elements of a quadruplet are modYet ASQP is not a typical generation task. There is eled in a fixed order $\\\\{x_{a c}\\\\rightarrow x_{s p}\\\\rightarrow x_{a t}\\\\rightarrow x_{o t}\\\\}$ .no need to fix the element order of the quadruplet as long as it can be extracted accurately. Aspect sentiment quadruplet has the order-free property, sugvario $\\\\{x_{a c}\\\\to x_{s p}\\\\to$ $x_{a t}\\\\to x_{o t}\\\\}$ all correct. →}and {$\\\\{x_{a t}\\\\rightarrow x_{a c}\\\\rightarrow x_{o t}\\\\rightarrow x_{s p}\\\\}$ →→→}, are  \\n\\nIn light of this observation, our curiosity is triggered: Does the order of the four elements impact the generative pre-trained language models’ performances? Thus we conduct a pilot experiment. The four elements are concatenated with commas, thus we could switch their orders in a flexible manner and obtain order permutations. It is found that some template orders can help the generative model perform better. Even only concatenating with commas, some orders outperform the state-of-the-art.  \\n\\nIt is hypothesized that different orders provide various views of the quadruplet. Therefore, we propose a simple but effective method to identify the most proper orders, and further combine multiple proper templates as data augmentation to improve the ASQP task. Concretely, we use the pre-trained language model ( Raffel et al. ,2020 ) to select the orders with minimal entropy. Such template orders can better promote the potential of the pre-trained language model. To jointly fine-tune these template orders together, inspired by Paolini et al. (2021 ), we design special markers for the four elements, respectively. The markers help to disentangle quadruplets by recognizing both the types and their values of the four elements from the target sequence. In this way, the template orders do not need to be fixed in advance.  \\n\\nIn summary, the contributions of this work are three-fold:  \\n\\n•We study the effects of template orders in the ASQP task, showing that some orders perform better. To the best of our knowledge, this work is the first attempt to investigate ASQP from the template order perspective.  \\n\\n•We propose to select proper template orders by minimal entropy computed with pretrained language models. The selected orders are roughly consistent with their ground-truth performances.  \\n\\n•Based on the order-free property of the quadruplet, we further combine multiple proper templates as data augmentation to improve the ASQP task. Experimental results demonstrate that our approach outperforms state-of-the-art methods and has significant gains in low-resource settings.',\n",
       "  'introduction': 'Introduction\\n\\nThe aspect sentiment quad prediction (ASQP) task, aiming to extract aspect quadruplets from a review sentence, becomes popular recently ( Zhang et al. ,2021a ;Cai et al. ,2021 ). The quadruplet consists of four sentiment elements: 1) aspect category (ac) indicating the aspect class; 2) aspect term (at) which is the specific aspect description; 3) opinion term (ot) which is the opinion expression towards the aspect; 4) sentiment polarity $(s p)$ denoting the sentiment class of the aspect. For example, the sentence “The service is good and the restaurant is clean.” contains two quadruplets ( service general ,service ,good ,positive ) and ( ambience general ,restaurant ,clean ,positive ).  \\n\\nFigure 1: An example sentence is paraphrased into a target sequence with a fixed-order template ( Zhang et al. ,2021a ). Our approach employs special markers to form free-order templates and generates multiple target sequences. $O_{i}$ is the $i$ -th order permutation of the four elements.   \\n\\n\\n<html><body><table><tr><td>Originalsentence</td><td>Therestaurantisclean.</td></tr><tr><td>Quadruplet (ac,at,ot,sp)</td><td>(ambiencegeneral,restaurant,clean,positive)</td></tr><tr><td>Semanticquadruplet (xacxatxoxsp）</td><td>(ambiencegeneral,restaurant,clean,great)</td></tr><tr><td>Fixed-ordertemplate</td><td>Xac is xsp because xat is xot</td></tr><tr><td>Targetsequence</td><td>ambiencegeneralisgreatbecauserestaurant is clean</td></tr><tr><td>Free-ordertemplate</td><td>O,([AC] xac,[AT] xa [OT] x [SP] x);i ∈ [1,24]</td></tr><tr><td>Multipletargetsequences</td><td>[AC] xac [AT] x [OT] x [SP] xsp [AT] x [AC] xac [OT] xr [SP] xsp</td></tr></table></body></html>  \\n\\nTo extract aspect sentiment quadruplets, Zhang et al. (2021a ) propose a new paradigm which transforms the quadruplet extraction into paraphrase generation problem. With pre-defined rules, they first map the four elements of $(a c,a t,o t,s p)$ into semantic values $(x_{a c},\\\\,x_{a t},\\\\,x_{o t},\\\\,x_{s p})$ , which are then fed into a template to obtain a nature language target sequence. As shown in Figure 1 , the original sentence is “re-writen” into a target sequence by paraphrasing. After fine-tuning the pre-trained language model ( Raffel et al. ,2020 ) in such a sequence-to-sequence learning manner, the quadruplets can be disentangled from the target sequence.  \\n\\nThough promising is this paradigm, one issue is that the decoder of the generative pre-trained language model ( Raffel et al. ,2020 ) is unidirectional (Vinyals et al. ,2015 ), which outputs the target sequence from the beginning of the sequence to its end. Thus four elements of a quadruplet are modYet ASQP is not a typical generation task. There is eled in a fixed order $\\\\{x_{a c}\\\\rightarrow x_{s p}\\\\rightarrow x_{a t}\\\\rightarrow x_{o t}\\\\}$ .no need to fix the element order of the quadruplet as long as it can be extracted accurately. Aspect sentiment quadruplet has the order-free property, sugvario $\\\\{x_{a c}\\\\to x_{s p}\\\\to$ $x_{a t}\\\\to x_{o t}\\\\}$ all correct. →}and {$\\\\{x_{a t}\\\\rightarrow x_{a c}\\\\rightarrow x_{o t}\\\\rightarrow x_{s p}\\\\}$ →→→}, are  \\n\\nIn light of this observation, our curiosity is triggered: Does the order of the four elements impact the generative pre-trained language models’ performances? Thus we conduct a pilot experiment. The four elements are concatenated with commas, thus we could switch their orders in a flexible manner and obtain order permutations. It is found that some template orders can help the generative model perform better. Even only concatenating with commas, some orders outperform the state-of-the-art.  \\n\\nIt is hypothesized that different orders provide various views of the quadruplet. Therefore, we propose a simple but effective method to identify the most proper orders, and further combine multiple proper templates as data augmentation to improve the ASQP task. Concretely, we use the pre-trained language model ( Raffel et al. ,2020 ) to select the orders with minimal entropy. Such template orders can better promote the potential of the pre-trained language model. To jointly fine-tune these template orders together, inspired by Paolini et al. (2021 ), we design special markers for the four elements, respectively. The markers help to disentangle quadruplets by recognizing both the types and their values of the four elements from the target sequence. In this way, the template orders do not need to be fixed in advance.  \\n\\nIn summary, the contributions of this work are three-fold:  \\n\\n•We study the effects of template orders in the ASQP task, showing that some orders perform better. To the best of our knowledge, this work is the first attempt to investigate ASQP from the template order perspective.  \\n\\n•We propose to select proper template orders by minimal entropy computed with pretrained language models. The selected orders are roughly consistent with their ground-truth performances.  \\n\\n•Based on the order-free property of the quadruplet, we further combine multiple proper templates as data augmentation to improve the ASQP task. Experimental results demonstrate that our approach outperforms state-of-the-art methods and has significant gains in low-resource settings.',\n",
       "  'related_works': 'Related Works\\n\\n\\n# 6.1 Aspect-Level Sentiment Analysis\\nAspect-level sentiment analysis presents a research trend that deals with four elements gradually in a finer-grained manner ( Zhang et al. ,2022 ). Analyzing sentiment at the aspect level begins from learning the elements separately ( Pontiki et al. ,2014 ). To name a few, some works have been proposed to classify sentiment polarity given the mentioned aspect, either aspect category ( Hu et al. ,2019 ) or aspect term ( Zhang and Qian ,2020 ). Other works extract aspect term ( Ma et al. ,2019 ), classify aspect category ( Bu et al. ,2021 ). The four elements are not solely existing, which actually have strong connections with each other. Therefore, researchers focus on learning them jointly, such as aspect sentiment pair ( Zhao et al. ,2020 ;Cai et al. ,2020 ) or triplet ( Chen and Qian ,2020 ;Mao et al. ,2021 ).  \\n\\nRecently, learning four elements simultaneously sparks new research interests. Two promising directions have been pointed out by researchers. Cai et al. (2021 ) propose a two-stage method by extracting the aspect term and opinion term first. Then these items are utilized to classify aspect category and sentiment polarity. Another method is based on generation model ( Zhang et al. ,2021a ). By paraphrasing the input sentence, the quadruplet can be extracted in an end-to-end manner. In this work, we follow the generative direction and consider the order-free property of the quadruplet. To the best of our knowledge, this work is the first to study ASQP from the order perspective.\\n\\n# 6.2 Data Augmentation\\nData augmentation has been widely adopted in both the language and vision fields. We formulate the input and output of a model as $X$ and $Y$ , respectively. Previous data augmentation can be divided into three types. The first type is augmenting the input $X$ . For example, image flipping, rotation and scaling all change $X$ to seek improvements ( Shorten and Khoshgoftaar ,2019 ). In the text tasks, back translation ( Sugiyama and Yoshinaga ,2019 ) can also generate pseudo pairs through augmenting $X$ .The main idea is that changing $X$ does not affects its ground-truth label $Y$ . Secondly, both $X$ and $Y$ are augmented. A promising work is mixup ( Zhang et al. ,2018 ), which constructs virtual training examples base on the prior knowledge that linear interpolations of feature vectors should lead to linear interpolations of the associated targets. Despite it is intuitive, it has shown effectiveness in many tasks ( Sun et al. ,2020 ).  \\n\\nThe third one is augmenting $Y$ . One recent work proposes virtual sequence as the target-side data augmentation ( Xie et al. ,2022 ) for sequence-tosequence learning. It deals with typical generation tasks, which are closely connected with the order of words. Different from it, we exploit the characteristic of the generative ASQP task. Order permutations still provide ground-truth labels. Then we think that different orders are just similar to seeing a picture from different perspectives, i.e. different views. Therefore, combining multiple template orders can prevent the model from being biased to superficial patterns, and help it to comprehensively understand the essence of the task.'},\n",
       " {'paper_id': '64e5849c3fda6d7f063af43e',\n",
       "  'paper_title': 'Aspect-oriented Opinion Alignment Network for Aspect-Based Sentiment Classification',\n",
       "  'abstract': 'Abstract\\n understanding representations to capture comprehensive opinion information. In the last two sentences with different aspects, our AOAN accurately predicted the sentiment, showcasing its ability to align opinion words with their corresponding aspect and address the problem of semantic mismatch.\\n\\n# 5 Conclusions\\nIn this paper, we propose an AOAN to address the semantic mismatch problem in ABSC task. Specifically, we first design a neighboring span enhanced module, which highlights a variety of neighboring words compositions with respect to aspect for better capturing the accurate relevant opinion words. Then, we design a multi-perspective attention module to utilize ',\n",
       "  'introduction': 'Introduction\\n\\nThe main purpose of aspect-based sentiment classification (ABSC) is to judge the sentiment polarity (positive, negative, neutral) [1, 2] of aspect words in sentences expressing opinions. ABSC is an entitylevel oriented and fine-grained challenge for sentiment analysis. To illustrate, consider the following sample sentence taken from the SemEval 2014 restaurant dataset:  \\n\\nFood is very good, though I occasionally wondered about freshness of raw vegetables in side orders .  \\n\\nIn this sentence, the aspect are \" food \" and \" raw vegetables \", and the expected sentiment polarities of these aspects are intended to be positive and negative. Identifying the sentiment polarity of aspect is crucial for applications [3, 4] such as product review analysis, where understanding customers’ opinions on specific aspects of a product can provide valuable insights for businesses.  \\n\\nTo solve the aspect-based sentiment classification (ABSC) task, it is crucial to establish the semantic relationship between an aspect and its corresponding opinion words. Various recurrent neural networks (RNNs) [5, 6] have been proposed to learn representations directly from the left or/and right context with regard to the given aspect. However, prior studies have faced difficulties in accurately establishing semantic associations between aspect and long-distance context on sequential modeling. Therefore, attention mechanism have been widely adopted in ABSC tasks to model the correlations between aspect and context [7, 8, 9]. Unlike RNN-based models, attention mechanisms possess global modeling capability, which allows them to capture long-distance dependencies between aspect and context. However, attention mechanisms may not be effective when dealing with sentences containing multiple aspects. For instance, the given aspect \" raw vegetables \" may be associated with the opinions words \"very good \" and \" though \" simultaneously. In such cases, attention mechanisms may struggle to align opinion words with their corresponding aspect, resulting in semantic mismatch [8].  \\n\\n  \\nFigure 1. An example sentence contains two aspects but with opposite sentiment polarities from the restaurant reviews.  \\n\\nTo address this issue, various works introduce position information (e.g. fixed size window) [10, 11] and proximity strategy (e.g. position weight decay) [12, 13], which have proved that context words in closer proximity are more likely to be the actual opinion words of the aspect. However, these approaches may not encompass or emphasize all relevant opinion words, limiting the model’s ability to fully comprehend the contextual meaning. As shown in Figure 1, the aspect \"food \" has the opinion words \" very good \" that are relatively close to the aspect and easy to capture its sentiment by the aforementioned methods. However, accurately calculating the sentiment of the aspect \"raw vegetables \" requires the capture of a more comprehensive set of semantic information, including the opinion word \" though \", which is much farther away from the aspect. Thus, the way prior works take position information into consideration may fall short in scenarios where the opinion words are distant from the aspect or convey complex semantic information. Therefore, the challenge remains on how to utilize attention mechanisms to accurately capture and match appropriate opinion words with respect to the given aspect .  \\n\\nDrawing on the insights from recent studies on the critical role of different semantic compositionalities which can improve expressive ability and provide syntactic structure for natural language understanding [14], as well as the demonstrated effectiveness of spanlevel information in aspect-based sentiment classification (ABSC) [15, 16], we present a novel Aspect-oriented Opinion Alignment Network (AOAN) for aspect-based sentiment classification. Our proposed model addresses the issue of semantic mismatch by introducing a neighboring span enhanced module to highlights a variety of neighboring words compositions with respect to aspect. These compositions are used to emphasize different ranges of aspect neighboring words, providing flexibility to the contextual association between the aspect and neighboring words. To capture more comprehensive relevant opinion words based on different compositions of neighboring words, we then propose a multi-perspective attention module that utilizes abstract understanding representations to model multiperspective sentiment representations of each aspect. This parallel attention mechanism improves the accuracy and comprehensiveness of capturing the relevant opinion words regarding the given aspect. Finally, the multi-perspective sentiment representations are combined by a global average pooling layer, which aggregates the information from all neighboring spans, providing a comprehensive representation of the overall sentiment expressed by the given aspect. The main contributions can be summarized as follows:  \\n\\n•We propose an Aspect-oriented Opinion Alignment Network (AOAN), a novel framework designed to mitigate the semantic mismatch problem. This method is capable of exploiting contextual association of different neighboring spans and guarantees proper alignment between opinion words and the given aspect. •Our propose AOAN employs a neighboring span enhanced module that highlights various compositions of neighboring words and given aspects, enabling the capture of more evident information related to a given aspect. Additionally, a multi-perspective attention module is designed to align comprehensive opinion information with the given aspect in a parallel way. •We conduct extensive experiments on three benchmark datasets to evaluate the effectiveness of our approach. Experimental results demonstrate that our model outperforms the state-of-the-art methods, which confirms the efficacy of our proposed approach.',\n",
       "  'related_works': 'Related Works\\n\\nAspect-based sentiment classification (ABSC) is a fine-grained sentiment analysis task that focuses on extracting sentiment polarity towards a specific aspect within a given context. Early ABSC methods [17, 18] relied on handcrafted features and failed to capture the intrinsic semantic associations between the given aspect and context.  \\n\\nRecently, various neural network-based approaches, such as Convolutional Neural Networks (CNNs) [13, 19], Recurrent Neural Networks (RNNs) [6, 20], and Memory Networks [7], have been proposed to model the semantic relation between the aspect and context in an implicit way. For instance, Tang et al. [6] introduced two LSTM-based models, namely TD-LSTM and TC-LSTM, which segmented the sentence into three parts: the preceding context words, the aspect, and the following context words. However, RNN-based and its variants methods face challenges in capturing long-distance contextual sentiment features when the aspect is far away from the opinion words, due to the limitation of sequential modeling.  \\n\\nWith this in mind, researchers have deployed attention mechanisms for the aspect-based sentiment classification (ABSC) task to capture long-distance semantic features through global modeling. Tang et al. [7] proposed a Deep Memory Network (MemNet) that utilizes an attention mechanism to explicitly capture the relevance of each contextual word with respect to the aspect and infer the sentiment polarity. However, the inherent defects of attention mechanisms cannot differentiate the correlations of contextual words with respect to the given aspect, leading to a semantic mismatch problem.  \\n\\nTo tackle the aforementioned issue, some works improved attention mechanisms by modeling a global perspective of sentence-level information [8] or interaction between aspect and context [21, 22]. Liu et al. [8] proposed a content attention-based aspect-based sentiment classification model (Cabasc) that captures crucial information about given aspects from a global perspective. Ma et al. [21] introduced an interactive attention network (IAN) that generates the representations of target and context interactively. Huang et al. [22] proposed an attention over attention networks (AOA) that learns attentions from both aspect-to-text and text-to-aspect, suggesting that opinion words are highly correlated with the aspect. However, they neglected the fact that the position information is also crucial for identifying the sentiment of the aspect.  \\n\\nTaking this into consideration, some researchers have introduced various position information and proximity strategies to improve the effectiveness of aspect-based sentiment classification (ABSC) models. Gu et al. [10] proposed a position-aware bidirectional attention network (PBAN) that gives more attention to neighboring words of the aspect than words with long distances, while Zhou et al. [11] proposed a position-aware hierarchical transfer (PAHT) model that utilizes position information from multiple levels. Chen et al. [12] adopted a proximity strategy that assumes a closer opinion word is more likely to be the actual modifier of the target and designed a recurrent attention network (RAM) to counter irrelevant information using weight decay mechanisms. However, these approaches may not encompass or emphasize all relevant opinion words, limiting the model’s ability to fully comprehend the contextual meaning.  \\n\\nAnother trend of research has explored the use of graph neural networks (GNNs) for modeling syntactic structures of sentence based on dependency trees. For instance, Zhang et al. [23] introduced aspectspecific graph convolutional networks (ASGCN) to handle aspectlevel sentiment classification tasks. Tian et al. [24] proposed a typeaware graph convolutional network (T-GCN) that utilizes an attentive layer ensemble to learn contextual information from different GCN layers. Li et al. [25] proposed a dual graph convolutional network (DualGCN) that simultaneously took syntax structures and semantic correlations into consideration. Although syntactic-based methods have achieved promising results, the imperfect parsing performance and randomness of input sentences inevitably introduce noise through the dependency tree.  \\n\\nPrior works [10, 12, 13] which take position information into consideration may fall short in scenarios where the opinion words are distant from the aspect or convey complex semantic information. In this paper, we do not rely on syntactic information and focus on the different compositions of aspect neighboring words, which provide comprehensive insights into the sentiment expressed.\\n\\n# 3 Methodology\\n\\n# 3.1 Overview\\nWe describe our model AOAN is this section, which has two main modules shown in Figure 2: a neighboring span enhanced module and a multi-perspective attention module. The neighboring span enhanced module highlights different compositions of neighboring words through multiple neighboring spans. The multi-perspective attention module captures relevant opinion words regarding the given aspect via multi-perspective sentiment representations. We will discuss each component in detail in the following sub-sections.  \\n\\n  \\nFigure 2. The overall architecture of AOAN, which is composed primarily of a neighboring span enhanced module highlights neighboring word spans of varying ranges, while a multi-perspective attention module captures relevant opinion words with a comprehensive view.\\n\\n# 3.2 Task Definition\\nThe aim of our model is to predict the sentiment polarity of a given sentence towards a given aspect, based on the contextual information in the sentence. Specifically, let $S=\\\\{w_{1},\\\\ldots,w_{n}\\\\}$ represent a sentence comprising $n$ words, and let $A=\\\\{w_{a+1},\\\\dots,w_{a+m}\\\\}$ denote the aspect mentioned in the sentence, consisting of mwords. Our goal is to accurately predict the sentiment polarity of the sentence $S$ towards the aspect $A$ from the set { positive ,neutral ,negative }.'},\n",
       " {'paper_id': '63350ce690e50fcafd350b97',\n",
       "  'paper_title': 'METS-CoV: A Dataset of Medical Entity and Targeted Sentiment on COVID-19 Related Tweets',\n",
       "  'abstract': 'Abstract\\n and introduction accurately reflect the paper’s contributions and scope? [Yes]   \\n(b) Did you describe the limitations of your work? [Yes] See Section 6   \\n(c) Did you discuss any potential negative societal impacts of your work? [Yes] See Section 5   \\n(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]  \\n\\n2. If you are including theoretical results...  \\n\\n(a) Did you state the full set of assumptions of all theoretical results? [N/A] (b) Did you include complete proofs of all theoretical results? [N/A]  \\n\\n3. If you ran experiments (e.g. for benchmarks)...  \\n\\n(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes] All the datasets, benchmarks and code are available at https://github.com/YLab-Open/METS-CoV   \\n(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes] Those details were listed in Section 4. For the hyperparameter selecting, we used the default hyperparameters of the benchmark models.   \\n(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [Yes] Yes, we reported the results based on experiments on 5 different random seeds. Mean $\\\\pm$ std were reported in this paper.   \\n(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [No] We didn’t include the consumption of resources as we are releasing a new dataset rather than proposing new architecture.  \\n\\n4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...  \\n\\n(a) If your work uses existing assets, did you cite the creators? [Yes] We used code from several models in our benchmarks, all the sources were properly cited in this paper.   \\n(b) Did you mention the license of the assets? [No] The code we used are all open available, they were used to evaluate model performance in our new dataset. We do not claim any copyright from the code.   \\n(c) Did you include any new assets either in the supplemental material or as a URL? [No]   \\n(d) Did you discuss whether and how consent was obtained from people whose data you’re using/curating? [Yes] See Section 5. This work was conducted on public available data, so this study is waived from the participant’s consent. We follow the privacy policy of Twitter platform when sharing this dataset.  \\n\\n(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [Yes] See Section 5  \\n\\n5. If you used crowdsourcing or conducted research with human subjects...  \\n\\n(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A] This work was conducted on public available data, it doesn’t have participants.   \\n(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A] This dataset is based on public available tweet text, it doesn’t have potential participant risks.   \\n(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [No] This dataset was voluntarily annotated by the authors and members of Prof. Jie Yang’s group.',\n",
       "  'introduction': 'Introduction\\n accurately reflect the paper’s contributions and scope? [Yes]   \\n(b) Did you describe the limitations of your work? [Yes] See Section 6   \\n(c) Did you discuss any potential negative societal impacts of your work? [Yes] See Section 5   \\n(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]  \\n\\n2. If you are including theoretical results...  \\n\\n(a) Did you state the full set of assumptions of all theoretical results? [N/A] (b) Did you include complete proofs of all theoretical results? [N/A]  \\n\\n3. If you ran experiments (e.g. for benchmarks)...  \\n\\n(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes] All the datasets, benchmarks and code are available at https://github.com/YLab-Open/METS-CoV   \\n(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes] Those details were listed in Section 4. For the hyperparameter selecting, we used the default hyperparameters of the benchmark models.   \\n(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [Yes] Yes, we reported the results based on experiments on 5 different random seeds. Mean $\\\\pm$ std were reported in this paper.   \\n(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [No] We didn’t include the consumption of resources as we are releasing a new dataset rather than proposing new architecture.  \\n\\n4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...  \\n\\n(a) If your work uses existing assets, did you cite the creators? [Yes] We used code from several models in our benchmarks, all the sources were properly cited in this paper.   \\n(b) Did you mention the license of the assets? [No] The code we used are all open available, they were used to evaluate model performance in our new dataset. We do not claim any copyright from the code.   \\n(c) Did you include any new assets either in the supplemental material or as a URL? [No]   \\n(d) Did you discuss whether and how consent was obtained from people whose data you’re using/curating? [Yes] See Section 5. This work was conducted on public available data, so this study is waived from the participant’s consent. We follow the privacy policy of Twitter platform when sharing this dataset.  \\n\\n(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [Yes] See Section 5  \\n\\n5. If you used crowdsourcing or conducted research with human subjects...  \\n\\n(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A] This work was conducted on public available data, it doesn’t have participants.   \\n(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A] This dataset is based on public available tweet text, it doesn’t have potential participant risks.   \\n(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [No] This dataset was voluntarily annotated by the authors and members of Prof. Jie Yang’s group.',\n",
       "  'related_works': 'Related Works\\n\\nMETS-CoV supports two basic NLP tasks from a medical perspective: 1) named entity recognition, i.e., identifying general and medical entities, and 2) targeted sentiment analysis, i.e., predicting the attitudes of Twitter users towards specific entities (including Drug and Vaccine ). This section reviews several commonly used open-source datasets for these two tasks and compares them with the proposed dataset.\\n\\n# 2.1 Named Entity Recognition Datasets\\nCoNLL 2003 (Tjong Kim Sang and De Meulder, 2003) is one of the most widely used NER datasets with its newswire texts collected from the Reuters Corpus. The dataset consists of 4 general entity types: PER (Person), LOC (Location), ORG (Organization), and MISC (Miscellaneous), which are also adopted in the SciTech News dataset (Jia et al ., 2019). WNUT NER (Strauss et al ., 2016) is a benchmark NER dataset for the social media domain that consists of manually annotated tweets with 10 entity types. Nevertheless, none of the entity types is medical-related. Similarly, the recently release Tweebank-NER dataset is neither medical-related (Jiang et al ., 2022). In the medical domain, NER is often used to extract medical terminologies from clinical case reports (CCRs) or electronic medical records (EMRs). A representative medical NER dataset is i2b2-2010 dataset (Uzuner et al .,2011), which includes discharge summaries and progress notes provided by well-known medical centers with 3 entity types: test, problem, and treatment. Besides, one of the SMM4H shared tasks (Klein et al ., 2020; Weissenbacher et al ., 2019) released a dataset for extracting tweet text spans with adverse drug reactions (ADR). This dataset is not COVID-19-related. On the other hand, the CORD-NER dataset (Wang et al ., 2020b) has 75 fine-grained types of entities from scientific papers about COVID-19 and historical coronavirus research. But since social media texts have way more colloquial forms than scientific papers, models trained on WNUT or CORD-NER are unsuitable for social media analyses.\\n\\n# 2.2 Targeted Sentiment Analysis Datasets\\nMost TSA studies typically experiment on 3 datasets: LAPTOP (Pontiki et al ., 2014), TWITTER (Mitchell et al ., 2013), and REST (Pontiki et al ., 2015, 2016). Specifically, LAPTOP and REST are user review datasets collected from the laptop and restaurant domains. The TWITTER dataset has tweets but only with general types of entities ( Person and Organization ). At the same time, the data might be outdated for the ever-involving social media languages.  \\n\\nThere are several recent open-domain TSA datasets. For example, YASO (Orbach et al ., 2021) is an open-domain TSA dataset containing more than 2,000 English user comments extracted from YELP 2 , AMAZON (Keung et al ., 2020), SST (Socher et al ., 2013), and OPINOSIS (Ganesan et al .,2010), covering a variety of topics in multiple domains. COVIDSenti (Naseem et al ., 2021) includes 90,000 COVID-19-related tweets annotated with overall sentiment polarity.  \\n\\nDespite the existing open-domain and in-domain datasets, NER and TSA on clinical social media texts remain an under-explored area. There is a pressing need for such datasets to facilitate social mediabased public health studies. To fill in this gap, we release METS-CoV, a COVID-19 tweets-based NER and TSA dataset with 3 general entity labels ( Person ,Location ,Organization ,) 4 medical entity labels ( Disease ,Drug ,Symptom , and Vaccine ) as well as sentiment labels for Person ,Organization ,Drug , and Vaccine entities.\\n\\n# 3 METS-CoV\\nIn this section, we provide a detailed description of the collection methodology, annotation process, and statistics of the NER and TSA subsets of METS-CoV.\\n\\n# 3.1 Data Collection Methodology\\nWe collect COVID-19 related tweets ranging from February 1, 2020, to September 30, 2021, whose unique Tweet Identifier (Tweet ID) came from an open-source database (Chen et al ., 2020a). All the tweets are downloaded following Twitter’s automation rules and data security policy. For data filtering, we first remove non-English tweets and retweets, resulting in 368,816,761 tweets. Then we filter out the tweets containing URLs because they are often restatements of third-party messages and do not directly reflect the users’ intentions and attitudes. Finally, we use a list of symptoms (including symptoms of COVID-19 as well as common diseases) as keywords to match the tweets to extract medical-related tweets (Wang et al ., 2020a; Goss et al ., 2018; Sarker et al ., 2020; Lopez-Leon et al .,2021; Mao et al., 2020). 2,208,676 tweets remain after the pre-processing step.\\n\\n# 3.2 Data Annotation Process\\nWe define 7 entity types based on public health research needs (Tsao et al ., 2021; Xu et al ., 2022), including 3 general entity types and 4 medical entity types for annotation. In particular, we select 4 entity types for additional sentiment annotation with 3 types of sentiment labels: positive, negative, and neutral. All the annotation work is done using the YEDDA annotation platform by Yang et al .(2018b). We first randomly sample 6,000 tweets from the pre-processed tweets for NER annotation. Then we use these 6,000 annotated NER data to train a BERT-based NER tagger and annotate the rest of the tweets. In order to include more medical entities in the dataset, we select additional 4,000 tweets from the model labeled data (with higher drug and vaccine entity ratios) and manually validate the entities to extend the dataset to a total number of 10,000 tweets.  \\n\\nHere we describe detailed annotation guidelines and processes for the METS-CoV-NER dataset and the METS-CoV-TSA dataset in detail. Note that all our annotators are from medical domains, including medicine, public health and pharmaceutical sciences.'},\n",
       " {'paper_id': '634e194090e50fcafd24e665',\n",
       "  'paper_title': 'StoryER: Automatic Story Evaluation Via Ranking, Rating and Reasoning',\n",
       "  'abstract': 'Abstract\\n\\nExisting automatic story evaluation methods place a premium on story lexical level coherence, deviating from human preference. We go beyond this limitation by considering a novel Story E valuation method that mimics human preference when judging a story, namely StoryER , which consists of three subtasks: Ranking, Rating and Reasoning. Given either a machine-generated or a human-written story, StoryER requires the machine to output 1) a preference score that corresponds to human preference, 2) specific ratings and their corresponding confidences and 3) comments for various aspects (e.g., opening, charactershaping). To support these tasks, we introduce a well-annotated dataset comprising (i) $100\\\\mathrm{k}$ ranked story pairs; and (ii) a set of $46\\\\mathrm{k}$ ratings and comments on various aspects of the story. We finetune Longformer-EncoderDecoder (LED) on the collected dataset, with the encoder responsible for preference score and aspect prediction and the decoder for comment generation. Our comprehensive experiments result in a competitive benchmark for each task, showing the high correlation to human preference. In addition, we have witnessed the joint learning of the preference scores, the aspect ratings, and the comments brings gain in each single task. Our dataset and benchmarks are publicly available to advance the research of story evaluation tasks.\\n\\n# 1 Introduction\\nEven for humans, evaluating story quality is a challenging task. Although many literature criteria have been proposed, the most straightforward way is to count how many readers like the story which is referred as to human preference. Bearing it in mind, story writing community usually uses upvote count Figure 1: The existing story evaluation method (UNION) outputs a score for estimating the coherence of the stories, while human-written stories rarely suffer from this problem. Our model (Ours) which is trained by comparing two stories (Ranking), evaluates the story based on human preference (i.e., upvote counts), produces scores for various aspects (Rating), and leaves comments (Reasoning). Our model is applicable to both machine-generated and human-written stories.  \\n\\n<html><body><table><tr><td colspan=\"4\">forcenturiesgrantingyoueternallife.</td></tr><tr><td colspan=\"3\">HumanWrittenStory1 (upvote count:1.8k) Input:W...h...y?\" She gurgled out, spilling blood onto my lap. Looking into myeyeslike apetbeing euthanized,she knows what\\'sgoing onbut doesn\\'t know why. I stay silent,..(remaining 453 words)</td><td colspan=\"2\">HumanWrittenStory2 (upvote count:1) Input:Untilyoumeet theonesoulmate..one whomatters...theoneforwhomchangeyour ways...yourredemption...howeverit\\'salmost ironicthatyouneedtodieforthe opportunity...(remaining 391words)</td></tr><tr><td colspan=\"3\">(Output)Coherence score(UNiON):0.99</td><td colspan=\"2\">(Output)Coherence score(UNiON): ：0.99</td></tr><tr><td colspan=\"3\">(Output)Preferencescore(Ours): 0.81</td><td colspan=\"3\">(Output)Preference score(Ours): 0.23</td></tr><tr><td colspan=\"3\">comment</td><td></td><td colspan=\"2\"></td></tr><tr><td>aspect</td><td>rating</td><td>This opening is engaging,</td><td>aspect</td><td>rating</td><td>comment This opening isvague and</td></tr><tr><td>opening</td><td>0.91</td><td>especiallywhen talking ... At the end of the story, the</td><td></td><td>0.12</td><td>ends up making a low. ... Sadly this story lacks an</td></tr><tr><td>ending</td><td>0.73</td><td>author narrates...</td><td>ending</td><td>0.13</td><td>ending. It seems that the...</td></tr><tr><td>111</td><td></td><td>111</td><td></td><td></td><td></td></tr></table></body></html>  \\n\\nas a story quality criterion. As shown in Fig. 1 ,more readers like the left story (upvote count $=$ 1.8k) rather than the right one (upvote count $=1$ ).  \\n\\nExisting methods which use referenced metrics (e.g., BLEU ( Papineni et al. ,2002 ), METEOR (Banerjee and Lavie ,2005 ), ROUGE ( Lin ,2004 )) and unreferenced metrics (e.g., UNION ( Guan and Huang ,2020 ), MANPLTS ( Ghazarian et al. ,2021 )), deviate from human preference (Fig. 1 ). On the contrary, we aim to explicitly evaluate a story, introducing a human preference-liked system consisting of three subtasks: Ranking, Rating and Reasoning.  \\n\\nWe build a model upon Longformer-EncoderDecoder (LED) ( Beltagy et al. ,2020 ), where the encoder predicts the preference score (Ranking), aspect ratings and confidences (Rating) while the decoder generates the comments (Reasoning). Inspired by widely-used pairwise comparison in story evaluation, we train our model with the ranking objectives. In this way, the score margin between good and poor stories are enlarged, resulting in high correlation between human preference and our predicted preference score (Fig. 1 ). We also witness that our performance is improved when we conduct joint training on three subtasks.  \\n\\n  \\nFigure 2: The Writing Prompt Dataset with metadata (left) contains prompt, story, upvotes, and comments from readers. Our dataset collection pipeline (right) shows the template for data collection. We ask the workers to select 3-5 aspects, score each aspect 1-5 from poor to good and leave the comments that shows the reason for the score they rated.  \\n\\nIn aid of the proposed task, we present a wellannotated crowd-sourcing dataset, consisting of two parts. (i) One is built from 63,929 stories and their corresponding upvote counts provided in WritingPrompt dataset (WP) ( Fan et al. ,2018 ) (Figure 2 (left)) by pairing one highly-upvoted story (upvotes $\\\\geq50\\\\$ ) and one lowly-upvoted story (upvotes $\\\\leq0$ under the same prompt. As a result, we obtain 100k pairs of stories, namely 100k story ranking data ,used to train and evaluate the preference score prediction. (ii) The other part is made up of 45,948 aspect comments and their respective rating scores (1-5) by Amazon Mechanical Turk (AMT) and augmented data (Section 3.2 ), namely 46k aspect rating and reasoning data , used for model explanation. Our contributions are three-fold:  \\n\\n•This study addresses a novel task StoryER, that consists of preference score prediction, aspect rating and comment generation.   \\n•We introduce a new dataset for StoryER task and create benchmarks to promote the story evaluation research.   \\n•Comprehensive experiments and intensive analysis indicate our preference score prediction outperforms previous metrics, and more accurately reflects human preference. Aspect rating and comment generation also helps in the evaluation and provide explanations. Moreover, we point out the remaining challenges under various scenarios in the hope that facilitates future research.',\n",
       "  'introduction': 'Introduction\\n\\nEven for humans, evaluating story quality is a challenging task. Although many literature criteria have been proposed, the most straightforward way is to count how many readers like the story which is referred as to human preference. Bearing it in mind, story writing community usually uses upvote count Figure 1: The existing story evaluation method (UNION) outputs a score for estimating the coherence of the stories, while human-written stories rarely suffer from this problem. Our model (Ours) which is trained by comparing two stories (Ranking), evaluates the story based on human preference (i.e., upvote counts), produces scores for various aspects (Rating), and leaves comments (Reasoning). Our model is applicable to both machine-generated and human-written stories.  \\n\\n<html><body><table><tr><td colspan=\"4\">forcenturiesgrantingyoueternallife.</td></tr><tr><td colspan=\"3\">HumanWrittenStory1 (upvote count:1.8k) Input:W...h...y?\" She gurgled out, spilling blood onto my lap. Looking into myeyeslike apetbeing euthanized,she knows what\\'sgoing onbut doesn\\'t know why. I stay silent,..(remaining 453 words)</td><td colspan=\"2\">HumanWrittenStory2 (upvote count:1) Input:Untilyoumeet theonesoulmate..one whomatters...theoneforwhomchangeyour ways...yourredemption...howeverit\\'salmost ironicthatyouneedtodieforthe opportunity...(remaining 391words)</td></tr><tr><td colspan=\"3\">(Output)Coherence score(UNiON):0.99</td><td colspan=\"2\">(Output)Coherence score(UNiON): ：0.99</td></tr><tr><td colspan=\"3\">(Output)Preferencescore(Ours): 0.81</td><td colspan=\"3\">(Output)Preference score(Ours): 0.23</td></tr><tr><td colspan=\"3\">comment</td><td></td><td colspan=\"2\"></td></tr><tr><td>aspect</td><td>rating</td><td>This opening is engaging,</td><td>aspect</td><td>rating</td><td>comment This opening isvague and</td></tr><tr><td>opening</td><td>0.91</td><td>especiallywhen talking ... At the end of the story, the</td><td></td><td>0.12</td><td>ends up making a low. ... Sadly this story lacks an</td></tr><tr><td>ending</td><td>0.73</td><td>author narrates...</td><td>ending</td><td>0.13</td><td>ending. It seems that the...</td></tr><tr><td>111</td><td></td><td>111</td><td></td><td></td><td></td></tr></table></body></html>  \\n\\nas a story quality criterion. As shown in Fig. 1 ,more readers like the left story (upvote count $=$ 1.8k) rather than the right one (upvote count $=1$ ).  \\n\\nExisting methods which use referenced metrics (e.g., BLEU ( Papineni et al. ,2002 ), METEOR (Banerjee and Lavie ,2005 ), ROUGE ( Lin ,2004 )) and unreferenced metrics (e.g., UNION ( Guan and Huang ,2020 ), MANPLTS ( Ghazarian et al. ,2021 )), deviate from human preference (Fig. 1 ). On the contrary, we aim to explicitly evaluate a story, introducing a human preference-liked system consisting of three subtasks: Ranking, Rating and Reasoning.  \\n\\nWe build a model upon Longformer-EncoderDecoder (LED) ( Beltagy et al. ,2020 ), where the encoder predicts the preference score (Ranking), aspect ratings and confidences (Rating) while the decoder generates the comments (Reasoning). Inspired by widely-used pairwise comparison in story evaluation, we train our model with the ranking objectives. In this way, the score margin between good and poor stories are enlarged, resulting in high correlation between human preference and our predicted preference score (Fig. 1 ). We also witness that our performance is improved when we conduct joint training on three subtasks.  \\n\\n  \\nFigure 2: The Writing Prompt Dataset with metadata (left) contains prompt, story, upvotes, and comments from readers. Our dataset collection pipeline (right) shows the template for data collection. We ask the workers to select 3-5 aspects, score each aspect 1-5 from poor to good and leave the comments that shows the reason for the score they rated.  \\n\\nIn aid of the proposed task, we present a wellannotated crowd-sourcing dataset, consisting of two parts. (i) One is built from 63,929 stories and their corresponding upvote counts provided in WritingPrompt dataset (WP) ( Fan et al. ,2018 ) (Figure 2 (left)) by pairing one highly-upvoted story (upvotes $\\\\geq50\\\\$ ) and one lowly-upvoted story (upvotes $\\\\leq0$ under the same prompt. As a result, we obtain 100k pairs of stories, namely 100k story ranking data ,used to train and evaluate the preference score prediction. (ii) The other part is made up of 45,948 aspect comments and their respective rating scores (1-5) by Amazon Mechanical Turk (AMT) and augmented data (Section 3.2 ), namely 46k aspect rating and reasoning data , used for model explanation. Our contributions are three-fold:  \\n\\n•This study addresses a novel task StoryER, that consists of preference score prediction, aspect rating and comment generation.   \\n•We introduce a new dataset for StoryER task and create benchmarks to promote the story evaluation research.   \\n•Comprehensive experiments and intensive analysis indicate our preference score prediction outperforms previous metrics, and more accurately reflects human preference. Aspect rating and comment generation also helps in the evaluation and provide explanations. Moreover, we point out the remaining challenges under various scenarios in the hope that facilitates future research.',\n",
       "  'related_works': 'Related Works\\n\\nOverlap-based metrics such as BLEU ( Sulem et al. ,2018 ) and ROUGE ( Lin ,2004 ) calculate lexical matches (i.e., n-gram matching) and reward the words that resemble the reference in their surface form, even if they do not accurately capture meaning, and penalize other paraphrases. Recent research ( Edunov et al. ,2020 ) indicates that these metrics do not reflect human preferences, particularly for open-ended text generation tasks.  \\n\\nNeural-based metrics are motivated by the success of transformers as multitask learners ( Vaswani et al. ,2017 ), and adapt them for the task of neural language evaluation. When compared to overlapbased metrics, BERTScore ( Zhang et al. ,2019 ), MoverScore ( Zhao et al. ,2019 ), BLEURT ( Sellam et al. ,2020 ) report stronger correlations with human judgment. For specific use, in open dialogue generation, Adem ( Lowe et al. ,2017 ) captures semantic similarity beyond word overlap statistics, and exploits both the context and the reference response to calculate its score for the model response. RUBER ( Tao et al. ,2018 ) and its variant, RUBERBERT ( Ghazarian et al. ,2019 ) evaluates a reply by taking into consideration both a ground-truth reply and a query without requiring labels of human satisfaction and can be extended to different datasets and languages.  \\n\\nNeural discriminator is proposed particularly for story evaluation. The metrics mentioned above show limited performance in story evaluation as demonstrated in Guan et al. (2021 ). UNION ( Guan and Huang ,2020 ) and MANPLTS ( Ghazarian et al. ,2021 ) analyze the problem from machinegenerated stories and generate negative data by heuristics and plot manipulation, and then distinTable 1: Data statistics of $100\\\\mathrm{k}$ story ranking data. #prompt denotes the number of unique prompts, $\\\\#S_{h i g h}$ and $\\\\#S_{l o w}$ denotes the number of highly-voted stories and lowly-voted stories. We also show the averaged word count in the parentheses. #pairs shows the number of ranked story pairs.  \\n\\n<html><body><table><tr><td></td><td>#prompt</td><td>#Shigh (word)</td><td>#Stow (word)</td><td>#pairs</td></tr><tr><td>Train</td><td>5892</td><td>10371 (491.01)</td><td>26246 (453.06)</td><td>66336</td></tr><tr><td>Val</td><td>2280</td><td>3816 (473.27)</td><td>11458 (446.40)</td><td>27748</td></tr><tr><td>Test</td><td>2280</td><td>3906 (488.32)</td><td>8132 (454.87)</td><td>22887</td></tr></table></body></html>  \\n\\nguish by a BERT-based model ( Devlin et al. ,2019 ). The coherence score they produce can be expressed as the probability of the story being identified as human-written story. In this paper, we require our model to follow human preference, not only the coherence, which we believe is a more general way of story evaluation.\\n\\n# 3 Dataset\\nOur dataset comprises of two parts: 100k story ranking, and $46\\\\mathrm{k}$ aspect rating and reasoning.\\n\\n# 3.1 100k Story Ranking Data\\nAs we mentioned above, ranking method is more flexible and better than discrimination when evaluating the story (we also experimentally compare them in Sec. F.1 ). We thus prepare $100\\\\mathrm{k}$ pairwise ranking data for training the model. To this end, we first collect 193,842 stories prior to 03/2020 from $\\\\mathbf{WP}^{3}$ along with their prompt, the number of upvotes and uncategorized comments. We remove the stories updated from 12/2019 to 03/2020, since newly-updated stories usually have few upvotes regardless of whether they are good or bad. Then, we exclusively keep stories with the word count between 200 and 800. Finally, we pick two stories from the same prompt, one highly upvoted (i.e., votes upvotes stories and 116,971 story pairs. We split the story $\\\\leq0$ $\\\\geq50^{\\\\;4}$ ), resulting in a total of 63,929 unique ) and one lowly upvoted (i.e., uppairs based on the prompts into training, validation and testing (Table 1 ), to ensure that each division receives a unique set of prompts.\\n\\n# 3.2 46k Aspect Rating and Reasoning Data\\nApart from the preference score, we require our model to provide ratings and comments on predefined aspects to aid in the explanation of the predicted preference score.  \\n\\nAspect category extraction. To begin with, we must determine which aspects in the content should be measured. As some readers leave comments to explain why they upvote or downvote the stories, a straightforward way is to extract aspect categories based on those uncategorized comments. We therefore adopt latent Dirichlet allocation (LDA), which models the documents with a certain number of topics, based upon the co-occurrence of individual words. More precisely, we follow Brody and Elhadad (2010 ) to treat each comment as a separate document. LDA can produce a distribution of frequency of occurrence for each word in the topics. We optimize LDA through a cluster validation scheme, and obtain the optimal number of aspects 10. Based on the most representative words in each topic, we manually name each topic as the aspect category. These aspect categories are defined using some widely used aspects inspired from the websites.  \\n\\nComment and aspect collection. Comments in WP meta data are neither categorized with aspect categories, nor labeled with ratings, and some of them are totally irrelevant to the content. More importantly, there is a bias towards positive comments, which implies that not too many readers are willing to leave comments on poor stories. Therefore, we collect new comments via crowd-sourcing. By learning from these well-annotated comment data, we train neural models to filter out noisy data from comments in WP meta data. To collect the data, we ask workers from AMT to select aspects, rate sentiment and leave comments on 5,964 unique stories from WP. For increasing the diversity of comments, some stories are allocated to two different annotators, resulting in a total of 9,112 submissions (i.e., 1.53 annotations/story). As shown in Figure 2 (right), each story requires the annotators to rate (normalized to 0-1) and leave comments on 3 to 5 aspects that are most confident by the workers. The final statistics of the comments is listed in Table 2 .Comment augmentation. The noisy comments in WP meta data then can be classified and analyzed by two models: aspect category classification model and comment sentiment analysis model that  \\n\\n<html><body><table><tr><td></td><td>#comment</td><td>#comment*</td><td>rate (1-5)</td><td>rate* (1-5)</td><td>comment_len</td><td>comment_len*</td></tr><tr><td>Structure</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>opening/beginning</td><td>3615</td><td>5617</td><td>2.53</td><td>3.15</td><td>30.20</td><td>32.44</td></tr><tr><td>middle/twist/flow/conflict</td><td>3967</td><td>5971</td><td>2.24</td><td>2.78</td><td>30.59</td><td>31.63</td></tr><tr><td>ending</td><td>5610</td><td>7615</td><td>2.13</td><td>2.49</td><td>31.48</td><td>31.59</td></tr><tr><td>WritingStyle</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>character shaping</td><td>5101</td><td>7102</td><td>2.21</td><td>2.53</td><td>31.57</td><td>34.23</td></tr><tr><td>scene description</td><td>4168</td><td>6172</td><td>2.18</td><td>2.53</td><td>31.75</td><td>39.30</td></tr><tr><td>Type</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>heartwarming/touching (Romance)</td><td>426</td><td>1866</td><td>2.99</td><td>4.39</td><td>32.05</td><td>32.64</td></tr><tr><td>sad/crying/tragedy(Tragedy)</td><td>462</td><td>1680</td><td>3.12</td><td>3.93</td><td>30.85</td><td>34.67</td></tr><tr><td>horror/scary (Horror)</td><td>815</td><td>1985</td><td>2.49</td><td>3.61</td><td>30.92</td><td>33.24</td></tr><tr><td>funny/hilarious/laugh (Comedy)</td><td>1153</td><td>3156</td><td>3.25</td><td>3.96</td><td>30.04</td><td>30.91</td></tr><tr><td>novelty/goodidea/brilliant (Fiction)</td><td>2782</td><td>4784</td><td>2.49</td><td>3.26</td><td>32.51</td><td>32.70</td></tr><tr><td>Overall</td><td>28099</td><td>45948</td><td>2.56</td><td>3.26</td><td>31.20</td><td>33.33</td></tr></table></body></html>  \\n\\nTable 2: Data statistics in $46\\\\mathrm{k}$ aspect rating and reasoning data. \\\\* denotes the data statistics after data augmentation. We list the number of comments with rating scores (2nd and 3rd columns), averaged rating scores (4th and 5th columns) and averaged word count (6th and 7th columns).  \\n\\ntrained with our collected data. The training details can be found in the supplementary material. We filter out irrelevant comments by eliminating those with no values in aspect categories that exceeds 0.9 after softmax and retain the comments with the word count ranged from 15 to 50. The remaining comments are then rated by the their sentiments. Finally, we obtain 17,849 valuable comments for 6,705 additional unique stories and merge them into our collected data, resulting in a total number of 45,948 for comments and 12,669 for unique stories. We split the collected data into training, validation, and test data in the ratio of 8:1:1 and put the augmented data into the training data (Table 2 ).'},\n",
       " {'paper_id': '64ae66fc3fda6d7f0684b46f',\n",
       "  'paper_title': 'Cross-Domain Data Augmentation with Domain-Adaptive Language Modeling for Aspect-Based Sentiment Analysis.',\n",
       "  'abstract': 'Abstract\\n and introduction summarize the paper’s main claims? ',\n",
       "  'introduction': 'Introduction\\n summarize the paper’s main claims? Abstract and senction ',\n",
       "  'related_works': 'Related Works\\n\\n\\n# 2.1 Aspect-Based Sentiment Analysis (ABSA)\\nAs an important task in sentiment analysis, ABSA has been extensively studied in the last decade. Earlier works mainly focus on two subtasks of ABSA, i.e., aspect extraction (AE) ( Liu et al. ,2015 ;Chen and Qian ,2020a ) and aspect-based sentiment classification (ASC) ( Zhang et al. ,2016 ;Chen et al. ,2017 ;Sun et al. ,2019 ;Wang et al. ,2020 ). Recently, many supervised methods are proposed to solve the two sub-tasks in an end-to-end manner, which either resort to multi-task learning to exploit the relations between AE and ASC ( Luo et al. ,2019 ;He et al. ,2019 ;Chen and Qian ,2020b ) or employ a collapsed tagging scheme to combine AE and ASC into a unified label space and formulate the task as a sequence labeling problem ( Wang et al. ,2018 ;Li et al. ,2019a ,b). Despite obtaining promising results on several benchmark datasets, these methods suffer from the lack of annotated data in many emerging domains. To alleviate this issue, we aim to propose an unsupervised domain adaptation method to generate sufficient labeled data for ABSA in any target domain.\\n\\n# 2.2 Unsupervised Domain Adaptation\\nIn the literature, a myriad of unsupervised domain adaptation methods have been proposed for coarsegrained sentiment analysis ( Zhuang et al. ,2020 ), including pivot-based methods ( Blitzer et al. ,2007 ;Yu and Jiang ,2016 ;Ziser and Reichart ,2018 ;Xi et al. ,2020 ), auto-encoders ( Glorot et al. ,2011 ;Zhou et al. ,2016 ), domain adversarial networks (Ganin and Lempitsky ,2015 ;Ganin et al. ,2016 ;Li et al. ,2018 ), and semi-supervised methods ( He et al. ,2018 ;Ye et al. ,2020 ). These methods primarily focus on learning domain-invariant representations to alleviate the distribution discrepancy across domains. Inspired by the success of these representation-based methods, a few recent studies have adapted them to the cross-domain ABSA task, in which the key idea is to learn a shared representation for each word or aspect term across domains ( Ding et al. ,2017 ;Wang and Pan ,2018 ,2019 ,2020 ;Li et al. ,2019c ;Zeng et al. ,2022 ;Chen and Qian ,2022 ). Moreover, Lekhtman et al. (2021 )proposed a customized pre-training approach with aspect category shift for the aspect extraction task.  \\n\\nDespite obtaining promising results, the major limitation of these aforementioned methods for cross-domain ABSA is that their models for the main ABSA task is solely trained on the sourcedomain labeled data. Thus, their models are insensitive to target-specific features. To address this issue, some studies have explored a Cross-Domain Data Augmentation framework (CDDA) to directly generate much target-domain labeled data, including MLM-based CDDA ( Yu et al. ,2021 ;Yang et al. ,2022 ) and Seq2Seq-based CDDA ( Chen et al. ,2021 ;Li et al. ,2022 ). However, the generated data by these methods has several limitations including 1) preserving many source-specific attributes such as syntactic structures; 2) lack of fluency and diversity. Thus, in this work, we aim to propose a new data augmentation framework that can generate fluent target-domain labeled data without any source-specific attributes.\\n\\n# 3 Methodology\\n\\n# 3.1 Problem Definition and Notations\\nFollowing previous studies ( Li et al. ,2019c ), we formulate ABSA and AE as a sequence labeling problem. Given a sentence with $n$ words $\\\\pmb{x}\\\\,=\\\\,\\\\{w_{1},w_{2},...,w_{n}\\\\}$ , the goal is to predict its corresponding label sequence $\\\\pmb{y}=\\\\{y_{1},y_{2},...,y_{n}\\\\}$ ,$y_{j}\\\\,\\\\in\\\\,\\\\bigl\\\\{{\\\\mathsf{B}}{\\\\mathsf{-}}{\\\\mathsf{P}}{\\\\mathsf{O}}{\\\\mathsf{S}},{\\\\mathsf{I}}{\\\\mathsf{-}}{\\\\mathsf{P}}{\\\\mathsf{O}}{\\\\mathsf{S}},{\\\\mathsf{B}}{\\\\mathsf{-}}{\\\\mathsf{N}}{\\\\mathsf{E}}{\\\\mathsf{G}},{\\\\mathsf{I}}{\\\\mathsf{-}}{\\\\mathsf{I}}$ EG ,B-NEU ,$\\\\mathsf{I}\\\\!-\\\\!\\\\mathsf{N E U},0\\\\!\\\\}$ }for ABSA and $y_{j}\\\\in\\\\{{\\\\mathsf{B}},{\\\\mathsf{I}},{\\\\mathsf{O}}\\\\}$ ∈{ }for AE.  \\n\\nIn this work, we focus on the unsupervised domain adaptation setting, in which the source domain has enough labeled data and the target domain only has unlabeled data. Let denote a set of source-domain labeled data, and $\\\\mathcal{D}^{S}=\\\\{(\\\\pmb{x}_{i}^{s},\\\\pmb{y}_{i}^{s})\\\\}_{i=1}^{N^{s}}$ }$\\\\mathbf{\\\\mathcal{D}}^{T}\\\\,=\\\\,\\\\{\\\\mathbf{x}_{i}^{t}\\\\}_{i=1}^{N^{t}}$ data. The goal is to leverage dict the label sequences of test data from the target }a set of target-domain unlabeled $\\\\mathcal{D}^{S}$ and $\\\\mathcal{D}^{T}$ to predomain.\\n\\n# 3.2 Overview\\nAs illustrated in Figure 2 , our Cross-Domain Data Augmentation framework contains three key stages, including 1) Domain-Adaptive Pseudo Labeling, 2) Domain-Adaptive Language Modeling, and 3) Target-Domain Data Generation. In the first stage, an aspect-aware domain adaptation model is trained to assign pseudo labels to unlabeled data in the target domain. In the second stage, the labeled source data and the pseudo-labeled target data are used to train a domain-adaptive language model, which integrates data generation and sequence labeling in a unified architecture to capture the transferable context and annotation across domains. After training the DALM, the last stage uses probabilitybased generation strategy to generate diverse targetdomain data with fine-grained annotations in an autoregressive manner.\\n\\n# 3.3 Domain-Adaptive Pseudo Labeling\\nIn this stage, our goal is to assign the pseudo labels to each unlabeled data in the target domain. Since the data distribution of the source domain is different from that of the target domain, directly training a classifier on the labeled source data to predict the pseudo labels of the unlabeled target data will bring much noise. Thus, it is necessary to alleviate the domain discrepancy to improve the quality of pseudo-labels. Since aspect terms are shown to play a crucial role in ABSA ( Gong et al. ,2020 ), we attempt to explicitly minimize the distance between source-domain and target-domain aspect term representations via Maximum Mean Discrepancy (MMD) ( Gretton et al. ,2012 ).  \\n\\nand the unlabeled ta the aspect terms in extract the aspect terms in based algorithm named Double Propagation ( Specifically, given the labe D$\\\\mathcal{D}^{S}$ data via D$\\\\mathcal{D}^{T}$ D$\\\\mathcal{D}^{T}$ gold labels and based on a rule, we first obtain source data Qiu $\\\\mathcal{D}^{S}$ et al. ,2011 ). Let us use $\\\\pmb{x}^{d}=\\\\{\\\\bar{w_{1}^{d}},\\\\bar{w_{2}^{d}},...,w_{n}^{d}\\\\}$ }to denote a source or target domain sentence and use $\\\\pmb{a}^{d}\\\\,=\\\\,\\\\{w_{i}^{d},...,w_{j}^{d}\\\\}$ to denote e aspect terms in the sentence, where then employ a pre-trained BERT model to obtain $d\\\\in\\\\{s,t\\\\}$ ∈{ }. We the hidden representation of the sentence $\\\\mathbf{H}^{d}=$ $\\\\{\\\\mathbf{h}_{1}^{d},\\\\mathbf{h}_{2}^{d},...,\\\\mathbf{h}_{n}^{\\\\bar{d}}\\\\}$ }and the asp epresentation $\\\\mathbf{a}^{d}=g(\\\\mathbf{h}_{i}^{d},...,\\\\mathbf{h}_{j}^{d})$ , where $\\\\mathbf{h}^{d}\\\\,\\\\in\\\\,\\\\mathbb{R}^{r}$ ∈,$r$ refers to the hidden dimension, and $g(\\\\cdot)$ denotes the meanpooling operation. Next, we propose an aspectlevel MMD loss to alleviate the distribution discrepancy across domains as follows:  \\n\\n  \\nFigure 2: Overview of cross-domain Data Augmentation with Domain-Adaptive Language Modeling $(\\\\mathrm{DA}^{2}\\\\mathrm{LM})$ .  \\n\\n$$\\n\\\\begin{array}{r l}&{\\\\mathcal{L}_{\\\\mathrm{mmd}}=\\\\mathrm{d}_{k}^{2}\\\\big(\\\\mathcal{D}_{a}^{S},\\\\mathcal{D}_{a}^{T}\\\\big)=\\\\cfrac{1}{\\\\big(N_{a}^{s}\\\\big)^{2}}\\\\sum_{i,j}^{N_{a}^{s}}k\\\\big(\\\\mathbf{a}_{i}^{s},\\\\mathbf{a}_{j}^{s}\\\\big)+}\\\\\\\\ &{\\\\cfrac{1}{\\\\big(N_{a}^{t}\\\\big)^{2}}\\\\sum_{i,j}^{N_{a}^{t}}k\\\\big(\\\\mathbf{a}_{i}^{t},\\\\mathbf{a}_{j}^{t}\\\\big)-\\\\cfrac{2}{N_{a}^{s}N_{a}^{t}}\\\\sum_{i}^{N_{a}^{s}}\\\\sum_{j}^{N_{a}^{t}}k\\\\big(\\\\mathbf{a}_{i}^{s},\\\\mathbf{a}_{j}^{t}\\\\big),}\\\\end{array}\\n$$  \\n\\nwhere aspect term representations in the source domain $\\\\mathcal{D}_{a}^{S}$ and $\\\\mathcal{D}_{a}^{T}$ respectively denote the sets of and the target domain, $N_{a}^{s}$ and $N_{a}^{t}$ refer to the number of aspect terms in the two domains, and $k(\\\\cdot)$ denotes the Gaussian Kernel function.  \\n\\nMeanwhile, for each source sample, the hidden representation $\\\\mathbf{H}^{s}$ is fed into a Conditional Random Field (CRF) layer to predict the label sequence for the ABSA or AE task $p(\\\\pmb{y}^{s}|\\\\mathbf{H}^{s})$ . The goal is to minimize the negative log-probability of the correct label sequence of each source-domain sample:  \\n\\n$$\\n\\\\mathcal{L}_{\\\\mathrm{crf}}=-\\\\sum_{i=1}^{N^{s}}\\\\log p(\\\\boldsymbol{y}_{i}^{s}|\\\\mathbf{H}_{i}^{s}).\\n$$  \\n\\nThe CRF loss for the ABSA or AE task and the aspect-level MMD loss are combined to train the base model $C_{b}$ :  \\n\\n$$\\n\\\\begin{array}{r}{\\\\mathcal{L}=\\\\mathcal{L}_{\\\\mathrm{crf}}+\\\\alpha\\\\mathcal{L}_{\\\\mathrm{mmd}},}\\\\end{array}\\n$$  \\n\\nwhere $\\\\alpha$ is the hyper-parameter.  \\n\\nFinally, we use $C_{b}$ to assign pseudo labels $\\\\{(\\\\pmb{x}_{i}^{p t},\\\\pmb{y}_{i}^{p t})\\\\}_{i=1}^{N^{t}}$ to each sample in .$\\\\mathcal{D}^{T}$ , and obtain $\\\\begin{array}{r l}{\\\\mathcal{D}^{P T}}&{{}=}\\\\end{array}$'},\n",
       " {'paper_id': '623004385aee126c0f9b5038',\n",
       "  'paper_title': 'Towards Unifying the Label Space for Aspect- and Sentence-based Sentiment Analysis',\n",
       "  'abstract': 'Abstract\\n\\nThe aspect-based sentiment analysis (ABSA) is a fine-grained task that aims to determine the sentiment polarity towards targeted aspect terms occurring in the sentence. The development of the ABSA task is very much hindered by the lack of annotated data. To tackle this, the prior works have studied the possibility of utilizing the sentiment analysis (SA) datasets to assist in training the ABSA model, primarily via pretraining or multi-task learning. In this article, we follow this line, and for the first time, we manage to apply the Pseudo-Label (PL) method to merge the two homogeneous tasks. While it seems straightforward to use generated pseudo labels to handle this case of label granularity unification for two highly related tasks, we identify its major challenge in this paper and propose a novel framework, dubbed as Dual-granularity Pseudo Labeling (DPL). Further, similar to PL, we regard the DPL as a general framework capable of combining other prior methods in the literature ( Rietzler et al. ,2019 ;Bai et al. ,2020 ). Through extensive experiments, DPL has achieved state-of-the-art performance on standard benchmarks surpassing the prior work significantly ( Liu et al. ,2021 ).\\n\\n# 1 Introduction\\n\\n# 1.1 Aspect-based Sentiment Analysis\\nThe aspect-based sentiment analysis (ABSA) task aims to recognize the sentiment polarities centered on the considered aspect terms occurring in the sentence. The establishment of the ABSA task echoes the long-standing literature of conventional sentence-level sentiment analysis (SA). For instance, as shown in Figure 1 , a normal ABSA data annotation tags sentiment score on specificaspect terms in the sentence, like “surroundings” as positive and “food” as negative. Meanwhile, in the conventional sentence-based sentiment analysis, the whole sentence is labeled as negative at a coarser granularity.  \\n\\n  \\nFigure 1: Sentiment Analysis (SA) and Aspect-based Sentiment Analysis (ABSA). The sample on the above is the ABSA task, while the sample on the bottom is the SA task. Both tasks aim at analyzing the sentiments carried by the objects in the box.  \\n\\nDue to its much finer granularity, the annotation cost is significantly higher than its conventional counterpart. Essentially, many of the existing SA datasets ( He et al. ,2018 ) can be crawled and curated straightforwardly from the review websites such as Amazon 1 or ${\\\\mathrm{Yelp}}^{2}$ . The five-star rating system comes in handy to accomplish the annotation. Thus, the SA datasets are often presented at a large scale. By contrast, the ABSA annotation has no such “free lunch”. It has to require human annotators to participate. Coupling with its higher complexity on labeling, the ABSA datasets are ubiquitously at considerably smaller scales ( Pontiki et al. ;He et al. ,2018 ;Yu et al. ,2021b ). To this date, the available datasets for conventional sentiment analysis are generally larger to several orders of magnitude than the ABSA.  \\n\\nFor instance, the commonly used ABSA benchmark SemEval 2014 task 4 has less than 5000 samples ( Pontiki et al. ), while there are 4,000,000 sentences in the Amazon Review dataset 3 for SA. Due to the similarity between the SA task and the ABSA task, it is natural to use SA datasets as auxiliary datasets for the ABSA task ( He et al. ,2018 ). Most, if not all, previous work has focused on pretraining and multi-task learning methods ( He et al. ,2018 ,2019b ). In this paper, we first take the PseudoLabel method to utilize the SA datasets to solve the challenge faced by the ABSA task.  \\n\\n  \\nFigure 2: Dataset Generation in the Pseudo-Label (PL) Method. This figure shows a pipeline of the traditional Pseudo-Label method. $\\\\mathbf{x}$ is the input data, a sentence in the SA dataset, while $y$ is the sentiment carried by a sentence. $\\\\mathbf{t}_{i}$ indicates the position of an aspect term in a sentence, and $y_{i}$ is the label for that aspect term. $\\\\mathbf{t}_{i}^{\\\\prime}$ and $y_{i}^{\\\\prime}$ are pseudo labels generated by the ABSA model. As we can see, in the PL method, the sentence sentiment labels are dropped, and the SA dataset is regarded as an unlabeled dataset.\\n\\n# 1.2 Pseudo-Label\\nThe family of Pseudo-Label methods has had wide success in multiple fields ( Pham et al. ,2020 ;Ge et al. ,2020 ;Mallis et al. ,2020 ;Zoph et al. ,2020 ;He et al. ,2019a ). The core of this family is to “trust” the generated fake labels by running the unlabeled samples through a teacher network that is trained by using the limited number of labeled samples. The generated labeled samples are then combined with the original set of supervised datasets and fed to the final model training.  \\n\\nIn this article, our core mission is to incorporate the large-scale datasets into the sentiment analysis with the targeted ABSA task. While there have been works on this line, such as He et al. (2018 )and He et al. (2019b ), exploring the Pseudo-Label methods has been very much untapped. Indeed, a very straightforward technological solution is depicted in Figure 2 . One can apply the traditional Pseudo-Label method to generate a bunch of pseudo-aspect-based sentiment labels from the SA or even the unlabeled datasets. However, a consequence of this is the total abandonment and waste of the provided coarse-grained labels. While seemingly acceptable, we argue that due to the homogeneous root for the ABSA and SA tasks, the under-exploiting of the sentence-level coarsegrained sentiment labels is sub-optimal. It will be unnecessary if the traditional framework throws away the coarser-grained labels containing finergrained task-relevant information. We argue that the Pseudo-Label family of approaches is limited to fit a uniform granularity situation. They ought to evolve and further adapt to the discrepancy of granularity in the label space.',\n",
       "  'introduction': 'Introduction\\n, some more important details of our experiments need to be clarified for ease of understanding.\\n\\n# Evaluate Matrix\\nThe model for ABSA is tested on SemEval’s test set. Like those who have performed this work before, we use the model classification accuracy (ACC) and macro-F1 (F1) scores as the evaluation criterion.\\n\\n# Batch Loader\\nSince the size of the current auxiliary dataset is much larger than the existing dataset. To avoid the large auxiliary dataset changing the original dataset distribution, we adopt two asynchronous loaders and define the step ratio $k$ , i.e., whenever the model is trained on the original dataset by 1 step, it is trained on the auxiliary dataset by $k$ steps. In general, we set $k=1$ .\\n\\n# Model Implementation\\nThe encoder has three main structures for the ABSA task: BERT ( Devlin et al. ,2018 ), Relational Graph Attention Networks (RGAT) ( Wang et al. ,2020 ), and masking embedding module. The BERT and RGAT have been proved to have a good effect on this task. The mask embedding module is used to generate $\\\\mathbf{z}$ and h. It is similar to the implementation of “segment_id” in the code of BERT.\\n\\n# 5.2 Main Results\\nTable 2 shows that the DPL has achieved a state-ofthe-art (SOTA) performance in terms of the average accuracy and F1-scores on the SemEval 2014 task 4 subtask 2 dataset. The group denoted as “Auxiliary Dataset is multi-task learning methods based on labeled datasets. Compared with them, our work shows the advantage of the PL method. “BERT-based” are some recently published works with good results. Obviously, our method achieves significant improvements over them.  \\n\\nIt should be noted that our design is based on the BERT. Thus the comparison is not made with the methods based on a more powerful pre-trained model, such as Roberta ( Liu et al. ,2019 ), DeBERTa ( Silva and Marcacini ), and GPT-3 ( Floridi and Chiriatti ,2020 ).  \\n\\n<html><body><table><tr><td rowspan=\"2\">Model</td><td colspan=\"2\">Restaurant</td><td colspan=\"2\">Laptop</td></tr><tr><td>Acc</td><td>F1</td><td>Acc</td><td>F1</td></tr><tr><td>RGAT(Baietal.,2020)</td><td>86.04</td><td>80.27</td><td>79.53</td><td>74.54</td></tr><tr><td>RGAT+DPL</td><td>87.22</td><td>81.47</td><td>81.01</td><td>77.52</td></tr><tr><td>Improvement</td><td>+1.18</td><td>+1.20</td><td>+1.48</td><td>+2.98</td></tr><tr><td>Adapter(Rietzleretal.,2019)</td><td>87.89</td><td>81.05</td><td>80.23</td><td>75.77</td></tr><tr><td>Adapter+DPL</td><td>89.54</td><td>84.86</td><td>81.96</td><td>78.58</td></tr><tr><td>Improvement</td><td>+1.65</td><td>+3.71</td><td>+1.73</td><td>+2.81</td></tr><tr><td>MultiBERT</td><td>84.54</td><td>78.52</td><td>78.32</td><td>73.87</td></tr><tr><td>MultiBERT+DPL</td><td>85.52</td><td>79.61</td><td>79.75</td><td>75.80</td></tr><tr><td>Improvement</td><td>+0.98</td><td>+1.09</td><td>+1.43</td><td>+1.93</td></tr></table></body></html>  \\n\\nTable 3: Results of Combining DPL with Other Methods. Restaurant and Laptop are two benchmarks same as those in Table 2 . RGAT ( Bai et al. ,2020 ), Adapter ( Rietzler et al. ,2019 ) are typical ABSA methods. MultiBERT is a multi-task baseline implemented by us. It predicts the SA label based on the “[cls]” and predicts the ABSA task based on the specific word vector. We add the DPL framework to them, denoted as “$+\\\\mathrm{DPL}^{\\\\circ}$ , and achieve significant improvements.\\n\\n# 5.3 DPL as a General Framework\\nAs we mentioned, we promote DPL as a general framework capable of combining other methods on the ABSA task. Table 3 shows the performances of some typical methods before and after they combine the DPL framework. On the one hand, RGAT (Bai et al. ,2020 ) is a model architecture based on GAT and BERT. Thus the improvement shows that the DPL framework fits other architectural designs, even without auxiliary datasets. On the other hand, for those methods involving auxiliary datasets, we take Adapter ( Rietzler et al. ,2019 ) and MultiBERT for demonstration. Previous works are mainly divided into two categories, pretraining and multitask learning. Adapter ( Rietzler et al. ,2019 ) can be categorized into the pretraining class while MultiBERT is a multi-task learning baseline inspired by He et al. (2018 ). Since the previous works using the multi-task method to combine the SA and the ABSA datasets were LSTM based, we implemented a better model based on the BERT. All the improvements verify that the DPL framework does not conflict with these methods and exhibits full compatibility for further performance gains.\\n\\n# 5.4 Ablation Study\\nWe set up several sets of ablation experiments and present the results in Table 4 to explore the role of adversarial training and pseudo labels in the DPL framework.  \\n\\nThe above experiments contain two types of BERT on the SemEval Restaurant dataset. To ensure the fairness of the ablation experiments, we Table 4: Results of ablation study. “Restaurant” takes plain BERT as the initial model while “Restaurant+Pre” takes Rietzler et al. (2019 )’s BERT as the initial model. “DPL” denotes our method. “Traditional Pseudo-Label” represents we take the PL method for fine-grained tasks dropped out the coarse-grained labels. The last three cases named in the form of “- X” means that we deleted the “X” from the original DPL to evaluate the effect of “X”.  \\n\\n<html><body><table><tr><td rowspan=\"2\">Model</td><td colspan=\"2\">Restaurant+Pre</td><td colspan=\"2\">Restaurant</td></tr><tr><td>Acc</td><td>F1</td><td>Acc</td><td>F1</td></tr><tr><td>DPL</td><td>89.54</td><td>84.86</td><td>86.68</td><td>80.44</td></tr><tr><td>TraditionalPseudo-Label</td><td>-1.43</td><td>-2.09</td><td>-1.60</td><td>-2.73</td></tr><tr><td>-adversarialtraining</td><td>-1.96</td><td>-3.31</td><td>-1.96</td><td>-3.60</td></tr><tr><td>-coarse-grainedpseudolabels</td><td>-1.60</td><td>-2.74</td><td>-1.34</td><td>-1.35</td></tr><tr><td>-fine-grainedpseudolabels</td><td>-1.96</td><td>-2.84</td><td>-0.79</td><td>-1.79</td></tr></table></body></html>  \\n\\nuse the same parameters when training the same group, and the parameter configurations are shown in Appendix.  \\n\\nThe comparison with “Traditional Pseudo-Label” shows the advantages of our method. From the item “- adversarial training”, the significant decline on F1 reflects that adversarial training plays an important role in the DPL framework. The items, “- coarsegrained pseudo labels” and “- fine-grained pseudo labels”, show that only adding adversarial training at one granularity has less effect than adding it both ways.  \\n\\nFurthermore, we also take Chamfer Distance (CD) between the set of $\\\\mathbf{h}$ and the set of $\\\\mathbf{z}$ to provide an insight into the effect of the mutual exclusiveness. And the CD of the model with the adversarial training process is $30\\\\%$ larger than that of the model without this process. That means the adversarial training process increases the distance between the variable $\\\\mathbf{h}$ and $\\\\mathbf{z}$ .',\n",
       "  'related_works': 'Related Works\\ns\\n\\n# 2.1 Aspect-based Sentiment Analysis (ABSA)\\nABSA is a finer-grained task of Sentiment Analysis (SA). It is a pipeline task, including aspect term extraction and aspect term sentiment classification. Aspect term sentiment classification is the true target task in this paper. For convenience, we use ABSA to refer to this task in the remaining parts.  \\n\\nLike other application tracks in NLP, the family of neural network models has wide successes in this task ( Jiang et al. ,2011 ;Vo and Zhang ,2015 ;Zhang et al. ,2016 ;Ma et al. ,2017 ;Li et al. ,2018 ;Wang et al. ,2018 ;Huang et al. ,2018 ;Song et al. ,2019 ). Wang et al. (2016 ) introduce attention mechanism into an LSTM to model the inter-dependence between sentence and aspect term. Tang et al. (2016 )apply Memory Networks in this task.  \\n\\nSyntax-based models have also been explored widely in this domain ( Dong et al. ,2014 ;Tai et al. ,2015 ;Nguyen and Shirai ,2015 ;Liu et al. ,2020 ;Li et al. ,2021 ;Pang et al. ,2021 ). Sun et al. (2019 )and Zhang et al. (2019 ) introduced graph convolution networks (GCN) to leverage the structured information from the dependency tree. Huang and Carley (2019 ) used graph attention networks (GAT) to improve the performance. Bai et al. (2020 ) and Wang et al. (2020 ) took the syntax relations as edge features and introduced them into the Relational Graph Attention Network (RGAT).  \\n\\nIn addition, pretrained language models like BERT ( Devlin et al. ,2018 ) have greatly promoted the development of ABSA ( Li et al. ,2018 ;Gao et al. ,2019 ;Song et al. ,2019 ;Rietzler et al. ,2019 ;Yang et al. ,2019 ).\\n\\n# 2.2 Using Extra Dataset for ABSA\\nDue to the dataset scale challenge of the ABSA task, there have been some methods exploring how to utilize the auxiliary dataset.  \\n\\nSome of them ( Xu et al. ,2019 ;Rietzler et al. ,2019 ;Yu et al. ,2021b ) achieve decent ABSA performance by post-processing or fine-tuning BERT (Devlin et al. ,2018 ) with an additional unlabeled dataset. For these methods, we argue that the cost of computation is too high. Moreover, DPL does not conflict with it and can accommodate the results of these works. We take Rietzler et al. (2019 )’s work as an example for comparison in experiments.  \\n\\nThe others ( He et al. ,2018 ,2019b ;Chen and Qian ,2019 ;Liang et al. ,2020 ;Yang et al. ,2019 ;Oh et al. ,2021 ;Yu et al. ,2021a ;Yan et al. ,2021 )utilize some labeled datasets and propose (later extend) a framework applying multitask learning methods. These auxiliary labeled datasets mainly include the sentiment analysis (SA) task and other subtasks of ABSA, such as Aspect Term Extraction, Opinion Term Extraction, and so on ( Yan et al. ,2021 ). DPL is more similar to these methods, using labeled datasets. However, we argue that the datasets of other subtasks can’t solve the problem of the high annotation cost. Thus, DPL utilizes the SA task as auxiliary datasets and is the first to apply the PL method to this problem.'},\n",
       " {'paper_id': '652d3668939a5f408248d8fe',\n",
       "  'paper_title': 'CERM: Context-aware Literature-based Discovery Via Sentiment Analysis',\n",
       "  'abstract': 'Abstract\\ns and full paper text of Pubmed publications, we extract sentences with one or more mentions of predefined entities. These predefined entities which are obtained from multiple sources 23456 can be placed in 6 groups: Genes, Disease, Chemical compounds, Nutrition, and Food ingredients. We then generate entity pairs from entities that appear in the same sentence. Thereby obtaining a dataset that focuses on the relationship between two entities, $e_{1}$ and $e_{2}$ , in a given sentence, s. Due to the high cost of data labeling, we only select 50 ,000 entity pairs with their corresponding sentences for labeling. We randomly selected entity pairs with corresponding sentences for labeling to achieve a representative distribution of entity pairs in the dataset. We then invite external data curators to assign the sentiment of selected entity pairs given their corresponding sentences. This labeling is done using the Amazon Mechanical Turk System 7 . Each entity pair and respective sentences are labeled by 3 different people as either positive, negative, or neutral. When there are disagreements, we use the majority label, and if there is no majority consensus, the data point is removed. After cleaning and post-processing the labeled data, we obtained 11 ,366 labeled entity pairs. The final labeled dataset has 2 ,890 positive, 3 ,191 negative, and 3 ,011 neutral entity pairs given their corresponding sentences. See Table 1 for data statistics.  \\n\\nGiven the labeled data, we randomly divided it into a 70/30 traintest split, where $70\\\\%$ of the data was used for training, and the remaining $30\\\\%$ was reserved for testing. Additionally, we augmented the training set with all available unlabeled data. Table 1 presents descriptive statistics for the dataset. Based on the statistics, it can be seen that the data has a relatively balanced label distribution. Also, it can be observed from the data that each data point contains a unique entity that does not repeat in other data points. This condition can lead to several issues (e.g., limited context and inconsistent quality) in the training process of the static word embeddings model if the learning process is only conducted on labeled data. Given that for the labeled data, each entity is only present in a single sentence;  \\n\\nTable 1. The statistics of ERSA dataset   \\n\\n\\n<html><body><table><tr><td>Property names</td><td>TrainingSet</td><td>Testing Set</td></tr><tr><td>Labelleddatacount -Positive -Negative -Neutral</td><td>9092 2890 3191 3011</td><td>2274 696 785 793</td></tr><tr><td>unlabeleddatacount Wordcount</td><td>50000</td><td>-</td></tr><tr><td>-Average -Min -Max</td><td>35.59 3 1008</td><td>35.20 3 603</td></tr><tr><td>#of unique entities -Chemicals -Consumables -Diseases -Nutrients -Gene</td><td>18184 7823 2450 7467 115 329</td><td>4548 1971 657 1820 28 72</td></tr></table></body></html>  \\n\\nthere may be insufficient contextual information available to extract relevant features (a limited context issue). Additionally, the lack of diversity in the quality and relevance of the sentences for each entity may significantly impact the model’s performance (inconsistent quality). Hence the need for a semi-supervised approach in learning.',\n",
       "  'introduction': 'Introduction\\n\\nWe are what we eat - Ludwig Feuerbach . This quote means that the food we eat can have either positive or negative effects on us. Hence, we should aim to consume food that brings us both health and happiness [8]. In recent years, the interest in healthy lifestyles and healthy diets has surged, especially after the pandemic [10, 22]. Ideally, advice regarding an appropriate diet for an individual should be consulted with a nutritionist, considering their expert knowledge for planning a personalized and sustainable diet according to their needs. However, consulting with a nutritionist can often be timeconsuming and require additional funds, making it less accessible. Previous studies have attempted to develop diet recommendation systems by training various artificial intelligence (AI) models on the massive food-related data available [4, 5, 12, 13, 20, 21]. While some studies focus on dietary recommendation systems targeting specific audiences (e.g., dietary planning for people with certain health conditions [5, 21] or age groups [4, 20]), few studies target a broader audience [12, 13].  \\n\\nIn [12], researchers developed a dietary recommendation system considering various factors such as demographic information, health conditions, and nutritional needs. The system accurately predicted meal suitability by representing it as a binary classification problem. However, due to its reliance on a neural network for prediction, the system lacked the ability to provide explanations for its decisions. Another study by Kim & Chung [13] combined symbolic AI and neural network approaches to produce a hybrid system that achieves the same goal but can explain its decisions. Due to the additional inference process by the symbolic AI approach, the resulting recommendations can be inherently explained. Although [13] has successfully addressed the explainability issue in the recommendation system, there is still room for improvement to produce a better solution. Previous studies [12, 13] have primarily focused on macronutrients and their impact on human health, neglecting the significance of micronutrients. However, the recommendation process should also consider essential factors like food-disease relationships, nutritional components, and ingredients, as outlined in [17]. Neglecting these components may lead to unintended consequences, such as recommending allergenic meals to users or suggesting the consumption of micronutrients that are unsuitable for a user’s medical condition.  \\n\\nA dietary recommendation system that can consider all the information in a meal concerning the user’s medical condition can be realized based on two other supporting systems. The first system should be responsible for breaking down all the nutritional information in a meal. Then, based on such information, the second could infer the relationship between each element and the user’s medical condition. Research on the first system has been extensively conducted, but research on the second is limited. In addition, research in this field often prioritizes the development of databases [7, 18, 23, 25] that summarize existing relationships rather than adaptive systems that can learn and incorporate new information. Arguably, such a system is much needed given that research related to food science is still developing; thus, knowledge about the effects of compounds on diseases can change based on the latest studies.  \\n\\nWith abundant food science-related research articles, learning relationships between food ingredients and biomedical concepts can be structured as a literature-based sentiment analysis task. Specifically, this task involves learning the relationship between two entities, $e_{1}$ and $e_{2}$ , given a short text (sentence) $s$ where the entities are mentioned. This problem is different from a regular sentiment analysis task since the sentiment of $s$ may not necessarily represent the sentiment of $e_{1}$ and $e_{2}$ . For instance, consider a sentence “The daily consumption of Ginger is beneficial to the body but ineffective for Diarrhea.” . Although the overall sentiment of the sentence is positive, the relationship between “Ginger” and “Diarrhea” have a contrasting sentiment. This example demonstrates that the overall sentiment of a sentence may not always indicate the sentiment of the relationship between entities mentioned within it. Furthermore, some sentences involve multiple entity pairs, further complicating the task. Hence, compared to regular sentiment analysis, this task presents a greater challenge. To address this issue, we propose modeling this problem as Entity Relationship Sentiment Analysis (ERSA).  \\n\\nDefinition 1 Entity Relationship Sentiment Analysis : Given a sentence sand two entities $e_{1}$ and $e_{2}$ , where $e_{1}\\\\neq e_{2}$ , the go is to determine the sentiment polarity of the relationship between $e_{1}$ and $e_{2}$ given s, where the relationship can be classified as either positive, negative, or neutral.  \\n\\nIn this study, we also propose CERM - a Context-aware Entity Relationship Prediction Model to address the ERSA problem more effectively. Inspired by promising results in previous studies [1, 2, 3, 9], we combine the abilities of static and contextualized word embeddings models to generate richer representations of the problem inputs. While BERT, as a contextualized word embeddings model, is used to represent the features of the input sentence ( s), a static word embeddings model is used to represent the features of each entity ( $\\\\boldsymbol{e}_{1}$ and $e_{2}$ ). The combination of these two models is believed to produce richer representations because static word embeddings are better at capturing general semantic relationships between words (e.g., antonyms and synonyms), while contextualized embeddings are better at capturing more subtle semantic relationships (e.g., negation and sarcasm) [1, 2, 3, 9]. Furthermore, the two models also produce different types of information, and their combination can result in a more comprehensive representation. Furthermore, considering the significant resources required for data labeling, we propose a semisupervised learning (SSL) strategy with the proposed model to leverage unlabeled data during the learning process.  \\n\\nThe contributions of this paper include:  \\n\\n•We introduce a new problem called Entity Relationship Sentiment Analysis (ERSA), which enables concept pair relationship inference from a wealth of food science-related research articles through sentiment analysis.   \\n•We propose Context-aware Entity Relationship Prediction Model (CERM) for the ERSA task. Our experiments demonstrate that CERM, our proposed model, outperforms the state-of-the-art semi-supervised text classification methods with application to the ERSA task. We have also showcased the effectiveness of CERM by evaluating its performance on the Aspect-based Sentiment Analysis (ABSA) task.   \\n•We introduce our dataset to facilitate further research in this literature-based sentiment analysis domain.',\n",
       "  'related_works': 'Related Works\\ns\\n\\n# 2.1 Fine-grained Sentiment Analysis Tasks\\nFine-grained sentiment analysis is a task in natural language processing that goes beyond the basic classification of sentiment and seeks to identify more nuanced sentiment levels in text. Entity-sentiment analysis (ESA) [19], aspect-based sentiment analysis (ABSA) [24], and multi-entity sentiment analysis (ME-ABSA) [32] are some examples of fine-grained sentiment analysis tasks. ESA involves identifying the sentiment associated with specific entities mentioned in a text, such as people, organizations, or products. ESA is more complex than regular sentiment analysis due to the presence of multiple entities in a text, and the sentiment towards one entity may contradict the sentiment towards another. For instance, based on a person’s tweet, the sentiment towards a specific player may be positive, but the sentiment towards the management may be negative [19].  \\n\\nOn the other hand, the ABSA task focuses on assigning multiple sentiments to a given text based on a list of aspects. For example, in [24], researchers define an ABSA task to detect multiple aspects (price, safety, transit, and general) in a comment toward a tourist destination and predict sentiment values for each aspect. As an ABSA task requires a more fine-grained understanding of the language used in the text, it is also considered a more challenging task than a regular sentiment analysis task. In [32], researchers extend the ABSA task to define the ME-ABSA task, where the focus is on identifying the sentiment with specific aspects of multiple entities mentioned in a text. As the ME-ABSA task deals with multiple entities and aspects, it is considered more challenging than ESA and ABSA tasks.  \\n\\nSimilar to the previously mentioned tasks, the ERSA task can be classified as a fine-grained sentiment analysis task. While ERSA focuses on predicting the sentiment of the relationship between two named entities, whether explicitly or implicitly expressed in a sentence, ME-ABSA deals with the sentiment of multiple entities and a predefined set of aspects. The critical distinction is that ERSA does not have a predefined set of relationships between entities, making it challenging to map or encode as a ME-ABSA task. In ERSA, the primary objective is to predict the sentiment of the entity relationship itself rather than the sentiment of an entity with respect to a specific aspect, which is the primary goal of ME-ABSA. Therefore, ERSA cannot be directly mapped or encoded as an ME-ABSA task.\\n\\n# 2.2 Semi-supervised Learning on Text Classification\\nSemi-supervised learning is motivated by the time-consuming and expensive process needed for data labeling, especially for data that requires domain-specific knowledge. Furthermore, a much larger pool of unlabeled data is often available than labeled data. There have been several applications of SSL in text classification. In [14], researchers proposed a mechanism for training a machine learning model that utilizes both labeled and unlabeled data. The method involves initially training the model on the labeled data and then using it to generate pseudo-labels for the unlabeled data. The key insight behind this approach is that the model’s predictions on the unlabeled data will likely be accurate for at least some examples.  \\n\\nIn [31], researchers defined an SSL algorithm by incorporating data augmentation techniques on the unlabeled data. By constraining the model’s predictions for the original and augmented versions of the unlabeled data, the model can learn to be consistent. For example, in a traditional sentiment analysis task, if the model predicts a sentiment value of a sentence like “Scientific research shows that ginger could help ease a sore throat” to be ‘positive,’ the model can further enhance its learning by applying a consistency loss that encourages consistent predictions for its augmented versions. The proposed method was also shown to perform well against other SSL algorithms for various text classification tasks. The researchers in [27] further extended the idea by combining consistency regularization and pseudo-labeling techniques. Similarly to [31], FixMatch uses consistency learning to encourage the model to make similar predictions for slightly perturbed versions of the same input. Instead of using all predictions on the unlabeled data, the FixMatch method only considers pseudo-labels with high confidence during the model training.  \\n\\nIn our proposed method, we incorporate consistency regularization on the unlabeled data, following the promising results of the previous two studies. Instead of using BackTranslation (BT) to generate noisy examples, we utilize Easy Data Augmentation (EDA) in our proposed method as it better preserves the original text’s meaning and requires lower computational resources. Moreover, considering the input from the problem domain, we also employ cosine embedding loss to generate a better model to learn the relevance of keywords and their corresponding text.\\n\\n# 3 ERSA Dataset\\nThe ERSA dataset is a dataset for entity relationship sentiment analysis. This dataset is extracted from the publication text of papers in the PubMed dataset 1 .  \\n\\nFrom the abstracts and full paper text of Pubmed publications, we extract sentences with one or more mentions of predefined entities. These predefined entities which are obtained from multiple sources 23456 can be placed in 6 groups: Genes, Disease, Chemical compounds, Nutrition, and Food ingredients. We then generate entity pairs from entities that appear in the same sentence. Thereby obtaining a dataset that focuses on the relationship between two entities, $e_{1}$ and $e_{2}$ , in a given sentence, s. Due to the high cost of data labeling, we only select 50 ,000 entity pairs with their corresponding sentences for labeling. We randomly selected entity pairs with corresponding sentences for labeling to achieve a representative distribution of entity pairs in the dataset. We then invite external data curators to assign the sentiment of selected entity pairs given their corresponding sentences. This labeling is done using the Amazon Mechanical Turk System 7 . Each entity pair and respective sentences are labeled by 3 different people as either positive, negative, or neutral. When there are disagreements, we use the majority label, and if there is no majority consensus, the data point is removed. After cleaning and post-processing the labeled data, we obtained 11 ,366 labeled entity pairs. The final labeled dataset has 2 ,890 positive, 3 ,191 negative, and 3 ,011 neutral entity pairs given their corresponding sentences. See Table 1 for data statistics.  \\n\\nGiven the labeled data, we randomly divided it into a 70/30 traintest split, where $70\\\\%$ of the data was used for training, and the remaining $30\\\\%$ was reserved for testing. Additionally, we augmented the training set with all available unlabeled data. Table 1 presents descriptive statistics for the dataset. Based on the statistics, it can be seen that the data has a relatively balanced label distribution. Also, it can be observed from the data that each data point contains a unique entity that does not repeat in other data points. This condition can lead to several issues (e.g., limited context and inconsistent quality) in the training process of the static word embeddings model if the learning process is only conducted on labeled data. Given that for the labeled data, each entity is only present in a single sentence;  \\n\\nTable 1. The statistics of ERSA dataset   \\n\\n\\n<html><body><table><tr><td>Property names</td><td>TrainingSet</td><td>Testing Set</td></tr><tr><td>Labelleddatacount -Positive -Negative -Neutral</td><td>9092 2890 3191 3011</td><td>2274 696 785 793</td></tr><tr><td>unlabeleddatacount Wordcount</td><td>50000</td><td>-</td></tr><tr><td>-Average -Min -Max</td><td>35.59 3 1008</td><td>35.20 3 603</td></tr><tr><td>#of unique entities -Chemicals -Consumables -Diseases -Nutrients -Gene</td><td>18184 7823 2450 7467 115 329</td><td>4548 1971 657 1820 28 72</td></tr></table></body></html>  \\n\\nthere may be insufficient contextual information available to extract relevant features (a limited context issue). Additionally, the lack of diversity in the quality and relevance of the sentences for each entity may significantly impact the model’s performance (inconsistent quality). Hence the need for a semi-supervised approach in learning.'},\n",
       " {'paper_id': '6602812813fb2c6cf6680045',\n",
       "  'paper_title': 'Counterfactual-Enhanced Information Bottleneck for Aspect-Based Sentiment Analysis',\n",
       "  'abstract': 'Abstract\\n\\nDespite having achieved notable success for aspect-based sentiment analysis (ABSA), deep neural networks are susceptible to spurious correlations between input features and output labels, leading to poor robustness. In this paper, we propose a novel CounterfactualEnhanced I nformation Bottleneck framework (called CEIB) to reduce spurious correlations for ABSA. CEIB extends the information bottleneck (IB) principle to a factual-counterfactual balancing setting by integrating augmented counterfactual data, with the goal of learning a robust ABSA model. Concretely, we first devise a multi-pattern prompting method, which utilizes the large language model (LLM) to generate high-quality counterfactual samples from the original samples. Then, we employ the information bottleneck principle and separate the mutual information into factual and counterfactual parts. In this way, we can learn effective and robust representations for the ABSA task by balancing the predictive information of these two parts. Extensive experiments on five benchmark ABSA datasets show that our CEIB approach achieves superior prediction performance and robustness over the state-of-the-art baselines. Code and data to reproduce the results in this paper is available at: https://github.com/shesshan/CEIB.',\n",
       "  'introduction': 'Introduction\\n\\nAspect-based sentiment analysis (ABSA), which aims to identify the sentiment of a specific aspect in a sentence, has raised increasing interest in both academic and industrial communities (Zhang et al. 2022). For accurate and stable sentiment prediction in this fine-grained sentiment analysis task, it is essential to capture the context words expressing opinions towards the target aspect.  \\n\\nSo far, deep learning techniques have been predominant in the ABSA task. Deep neural networks can automatically and efficiently learn discriminative contextual representations of both the context and aspect without time-consuming human annotation (Negi and Buitelaar 2014). To model the semantic relationship between the target aspect and its context, various attention mechanisms have been proposed to learn interactive features of the context and aspect (Tang, Qin, and Liu 2016; Ma et al. 2017; Lei et al. 2019). In another line, several works explicitly capture syntax-aware features for the aspect by incorporating the syntactic knowledge with graph neural networks (Huang and Carley 2019; Wang et al. 2020; Liang et al. 2022). More recently, pre-trained language models (PLMs), such as BERT (Devlin et al. 2019), have been applied to the ABSA task, yielding state-of-theart results (Song et al. 2019; Xu et al. 2019; Jiang et al. 2019; Zhang, Zhou, and Wang 2022). Extensive linguistic knowledge learned from the large-scale textual corpus can be utilized to improve the performance for ABSA.  \\n\\n  \\nFigure 1: Spurious correlation between the context words “never had ” and the sentiment label “P OSITIVE ” in the restaurant dataset. We use counterfactual data featuring identical spurious context words while different sentiment labels to encourage the model to capture vital opinion words.  \\n\\nDespite the effectiveness of prior studies, few efforts are devoted to mitigating the spurious correlation problem for ABSA. Specifically, deep ABSA models appear to associate superficial patterns with predicted labels, which are held by most training samples but not intrinsic to the ABSA task. For example, as shown in Figure 1, due to the high cooccurrence of the context words “ never had ” and the sentiment label “P OSITIVE ” in the training corpus, deep models tend to learn the strong correlation between the context words “never had” and the label “P OSITIVE ”, rather than capturing the semantically crucial opinion expressions. As a result, models would fail to infer the ground-truth label “N EUTRAL ” for the testing instance, which contains the words “ never had ” without holding this spurious correlation. Under such an inductive bias, models that have achieved promising performance on the in-domain data would suffer from poor robustness against the out-of-distribution or more challenging data. One possible solution to tackle this challenge is to introduce counterfactual data with the similar spurious context words while opposite sentiment labels to motivate the counterfactual thinking (Wang et al. 2022) ability of the ABSA model. In this way, the model can pay more attention to semantically relevant opinion words for the target aspect. In addition, incorporating the original data with the augmented counterfactual data without considering their interactions would even exacerbate the model performance. Thus, it poses a non-trivial challenge to devise a strategy to effectively exploit the interactions between factual and counterfactual data for improving the robustness of the deep ABSA model.  \\n\\nIn light of this, we propose a CounterfactualEnhanced I nformation Bottleneck framework (called CEIB) to reduce spurious correlations for ABSA, aiming to improve the robustness of the deep ABSA model. The proposed CEIB framework learns a more robust model by taking benefits of both the large language model (LLM) to generate counterfactual data from the original training data and the information bottleneck (IB) principle to model interactions between the original data and augmented data. Specifically, we first devise a multi-pattern prompting method, utilizing LLM to generate high-quality counterfactual samples from the original training samples. Then, we employ the IB principle to discard spurious features from the input while retaining essential predictive information for the sentiment label. To enhance the capacity of CEIB in characterizing adversarial and out-of-distribution data, we separate the mutual information in the original IB objective into factual and counterfactual parts by leveraging the original factual sample and the generated counterfactual sample. By balancing the predictive information of these two parts, we can learn more robust and balanced representations for the ABSA task. The main contributions in this paper can be summarized as follows:  \\n\\n• We propose a novel CEIB framework for robust aspectbased sentiment analysis, which reduces spurious correlations by taking advantage of both the IB principle and counterfactual data augmentation, with the aim of learning a more robust ABSA model.   \\n• We devise a multi-pattern prompting-based method, utilizing LLM to generate high-quality counterfactual data, which are then leveraged to balance the predictive information of the original training data to learn effective and robust representations.   \\n• We conduct extensive experiments on five widely utilized benchmark ABSA datasets. Experimental results show that CEIB achieves better prediction and robustness performance compared to the strong competitors.',\n",
       "  'related_works': 'Related Works\\n\\n\\n# Aspect-Based Sentiment Analysis\\nAs an essential task in natural language processing, sentiment analysis is commonly studied at document-level or sentence-level, which makes distinguishing sentiment polarities of different aspects in a single document or sentence difficult. To address this limitation, aspect-based sentiment analysis (ABSA), a fine-grained sentiment analysis task, is proposed to identify the sentiment polarity towards a specific aspect within a sentence or document.  \\n\\nSo far, deep neural networks have dominated the literature on ABSA. Earlier approaches centered around devising diverse attention mechanisms to learn attention-based representations of the context and the target aspect, which implicitly captured the semantic relationship between the given aspect and its context (Wang et al. 2016; Ma et al. 2017; Lei et al. 2019). For example, Wang et al. (2016) first proposed attention-based LSTMs to capture relevant sentiment information from the context given the target aspect. Ma et al. (2017) introduced an interactive attention to interactively learn the attention-aware representations of the target aspect and its context.  \\n\\nIn another trend, several studies focus on explicitly capturing syntax-aware features for the target aspect by leveraging syntactic knowledge and graph neural networks (Huang and Carley 2019; Wang et al. 2020; Tian, Chen, and Song 2021; Liang et al. 2022). The key idea of these methods involves exploiting the syntactic structures, such as syntax dependency trees, to build graphs. Then, graph convolutional networks (GCNs) (Tian, Chen, and Song 2021; Liang et al. 2022) or graph attention networks (GATs) (Huang and Carley 2019; Wang et al. 2020) can be utilized to aggregate sentiment information from the syntactically adjacent nodes to the target aspect node.  \\n\\nMore recently, the pre-trained language models (PLMs), such as BERT (Devlin et al. 2019) and RoBERTa (Liu et al. 2019), have been applied to ABSA and yielded state-of-theart results (Song et al. 2019; Jiang et al. 2019; Wang et al. 2020; Zhang, Zhou, and Wang 2022). These methods either employed BERT/RoBERTa as an embedding layer to acquire better initial embeddings (Wang et al. 2020; Jiang et al. 2019) or fine-tuned BERT/RoBERTa-based models by incorporating a task-specific classification layer (Xu et al. 2019). They absorbed the merit of rich linguistic and world knowledge contained in PLMs.\\n\\n# Spurious Correlation Reduction in NLP\\nDespite the preliminary success, deep neural networks are notoriously prone to learning spurious correlations between superficial feature patterns and the predicted label. For instance, models can achieve promising results in natural language inference (NLI) without capturing the semantic correlations between hypothesis and premises, due to the reliance on specific linguistic patterns in hypothesis (Gururangan et al. 2018) or superficial heuristics between the input text pairs (McCoy, Pavlick, and Linzen 2019). Similar biases have also been revealed in other tasks, including question answering (Jia and Liang 2017) and reading comprehension (Kaushik and Lipton 2018). These models are “right for the wrong reasons”, which results in poor robustness when the data distribution shifts.  \\n\\nExisting solutions to mitigate the spurious correlation problem can be roughly grouped into two categories: (1)  \\n\\n  \\nFigure 2: The overview of our CEIB, encompassing two primary modules: (a) counterfactual data augmentation module that employed LLM to generate the counterfactual data and (b) information bottleneck module with a factual-counterfactual balance setting to learn a more robust ABSA model.  \\n\\ndata augmentation (Zellers et al. 2018; Nie et al. 2020; Wang and Culotta 2021; Wu et al. 2022), and (2) ensemble learning (Clark, Yatskar, and Zettlemoyer 2020; Stacey et al. 2020; Sanh et al. 2021; Tian et al. 2022).  \\n\\nThe key idea of the data augmentation-based methods is to generate adversarial samples without the superficial patterns or spurious associations to alleviate the dataset bias, thus training more robust models. For instance, Zellers et al. (2018) proposed an adversarial filtering method to generate counterfactual samples and filter them in an adversarial way, which reduced spurious stylistic artifacts in the original dataset. Nie et al. (2020) augmented the original training dataset with human-written samples which exposed the model’s brittleness on spurious correlations in an iterative human-in-the-loop manner. Wang and Culotta (2021) introduced a dataset de-biasing paradigm from the causaltheoretic perspective, which generated causally counterfactual data to train debiased models.  \\n\\nEnsemble learning-based methods proposed to leverage bias-only models to capture superficial features or shallow patterns presented in the training data, and then train a debiased model with the detected spurious correlations. For instance, Stacey et al. (2020) designed a classifier to learn the biases and discouraged the hypothesis encoder from learning them, which in turn updated the biased classifier in an adversarial learning way. Clark, Yatskar, and Zettlemoyer (2020) leveraged a low-capacity model as the bias-only model to capture simple patterns and down-weighted the corresponding loss to train a more robust model via ensemble learning. Tian et al. (2022) detected the spurious correlations in the training dataset based on the causal inference theories and incorporated a new counterfactual model with the factual model to mitigate the bias.  \\n\\nIn this paper, we reduce spurious correlations for robust ABSA by taking benefits of both the data augmentationbased and ensemble learning-based approaches. We first generate counterfactual data where the spurious correlations do not hold in order to encourage the trained model to capture semantically relevant opinion words for the target aspect. Then, we employ the IB principle to balance the predictive information of the original factual data and the augmented counterfactual data to learn a more robust ABSA model in an ensemble manner.'},\n",
       " {'paper_id': '65824f86939a5f4082a849b7',\n",
       "  'paper_title': 'Aspect-Based Sentiment Analysis with Explicit Sentiment Augmentations',\n",
       "  'abstract': 'Abstract\\n\\nAspect-based sentiment analysis (ABSA), a fine-grained sentiment classification task, has received much attention recently. Many works investigate sentiment information through opinion words, such as “good” and “bad”. However, implicit sentiment data widely exists in the ABSA dataset, whose sentiment polarity is hard to determine due to the lack of distinct opinion words. To deal with implicit sentiment, this paper proposes an ABSA method that integrates explicit sentiment augmentations (ABSA-ESA) to add more sentiment clues. We propose an ABSA-specific explicit sentiment generation method to create such augmentations. Specifically, we post-train T5 by rule-based data and employ three strategies to constrain the sentiment polarity and aspect term of the generated augmentations. We employ Syntax Distance Weighting and Unlikelihood Contrastive Regularization in the training procedure to guide the model to generate the explicit opinion words with the same polarity as the input sentence. Meanwhile, we utilize the Constrained Beam Search to ensure the augmentations are aspect-related. We test ABSA-ESA on two ABSA benchmarks. The results show that ABSA-ESA outperforms the SOTA baselines on implicit and explicit sentiment accuracy.\\n\\n# Introduction\\nA spectbased Sentiment A nalysis ( ABSA ) aims to induce predictive models over manually annotated sentences to identify the sentiment polarity towards each specific aspect term (Wang et al. 2022a; Li et al. 2022). Taking the second sentence in Fig. 1 (a) as an example, the task aims to automatically identify the sentiment polarities of its aspect terms “outside ” ( Negative ) and “ atmosphere ” ( Positive )potentially with the corresponding opinion words “crushed” and “nice”. Due to its popularity, ABSA has been widely applied in many real-world scenarios, and accordingly, it is one of the most significant tasks in the natural language processing community (Yang et al. 2023; Ouyang et al. 2023).  \\n\\nTo handle the task of ABSA, many studies have been investigated during the past decade. Broadly speaking, the focus of recent work is on how to generate more discriminative  \\n\\n(a) 1. The fried rice (Positive) is amazing here . 2. It is crushed at outside (Negative), but the minute you walk inside, it has a nice atmosphere (Positive).   \\n(b) $\\\\vec{1}$ . Our server checked on us maybe twice during the entire meal (Negative). 2. All the money went into the interior decoration (Positive), none of it went to the chefs (Negative).  \\n\\nrepresentations for aspect terms to enhance the identification performance of sentiment polarity. Some early studies generate strong aspect term representations by directly employing deep neural encoders, such as LSTM (Tang et al. 2016; Wang et al. 2016; Cheng et al. 2017) and pre-trained language models (Xu et al. 2020; Dai et al. 2021). Beyond them, to further link the aspect terms and opinion words, some studies build dependency trees of sentences and then generate aspect term representations by employing graph convolution networks (GCN) (Sun et al. 2019; Wang et al. 2020; Chen, Teng, and Zhang 2020; Li et al. 2021a).  \\n\\nThe success of the GCN-based approach underscores the pivotal role that opinion words play in the realm of ABSA. However, recent research has highlighted a complex scenario characterized by a lack of distinct opinion words, termed ”implicit sentiment” (Li et al. 2021b; Wang et al. 2022b). To delve into this phenomenon, we select four examples from the Rest.14 to compare the implicit and explicit sentiment sentences. In the context of Fig.1(a), the sentiment is discernible due to distinct opinion words. In contrast, as shown in Fig.1(b), unraveling the sentiment associated with aspect terms such as ”meal,” ”interior decoration,” and ”chefs” is challenging. Implicit sentiment is a prevalent occurrence within ABSA datasets and it is hard to deal with (Li et al. 2021b).  \\n\\nTo tackle the challenge mentioned above, in the paper, we design a novel ABSA method by integrating Explicit Sentiment A ugmentations ( ABSA-ESA ). Such augmentations provide more sentiment clues for predicting sentiment polarity. We add them after the corresponding input sentence, forming new ABSA training data. To obtain the augmentations, we design an ABSA-specific explicit sentiment generation method. We aim to generate the sentences explicitly conveying the same sentiment polarity as their corresponding input sentences, targeting the same (or similar) aspect terms . We post-train the generation model T5 (Raffel et al. 2020) by the rule-based data selected in the ABSA dataset, making the generated augmentations comply with the above requirements. Furthermore, we introduce three strategies to confine the generated augmentations about their sentiment polarity and aspect terms. Specifically, in the training procedure, we employ the Syntax Distance Weighting and Unlikelihood Contrastive Regularization to lead the model to generate explicit opinion words with the same polarity as the input sentence. Subsequently, when engendering the augmentations, we employ the Constrained Beam Search to ensure the augmentations are aspect-related.  \\n\\nTo sum up, our contributions can be listed as follows:  \\n\\n• We propose a novel ABSA framework named ABSAESA, which focuses on solving the implicit sentiment issue by generating explicit sentiment augmentations.   \\n• We propose an ABSA-specific explicit sentiment generation method that generates augmentations with distinct opinion words for specific aspect terms.   \\n• Empirical results on two ABSA benchmarks show that ABSA-ESA outperforms other methods on both explicit and implicit accuracy.',\n",
       "  'introduction': 'Introduction\\n\\nA spectbased Sentiment A nalysis ( ABSA ) aims to induce predictive models over manually annotated sentences to identify the sentiment polarity towards each specific aspect term (Wang et al. 2022a; Li et al. 2022). Taking the second sentence in Fig. 1 (a) as an example, the task aims to automatically identify the sentiment polarities of its aspect terms “outside ” ( Negative ) and “ atmosphere ” ( Positive )potentially with the corresponding opinion words “crushed” and “nice”. Due to its popularity, ABSA has been widely applied in many real-world scenarios, and accordingly, it is one of the most significant tasks in the natural language processing community (Yang et al. 2023; Ouyang et al. 2023).  \\n\\nTo handle the task of ABSA, many studies have been investigated during the past decade. Broadly speaking, the focus of recent work is on how to generate more discriminative  \\n\\n(a) 1. The fried rice (Positive) is amazing here . 2. It is crushed at outside (Negative), but the minute you walk inside, it has a nice atmosphere (Positive).   \\n(b) $\\\\vec{1}$ . Our server checked on us maybe twice during the entire meal (Negative). 2. All the money went into the interior decoration (Positive), none of it went to the chefs (Negative).  \\n\\nrepresentations for aspect terms to enhance the identification performance of sentiment polarity. Some early studies generate strong aspect term representations by directly employing deep neural encoders, such as LSTM (Tang et al. 2016; Wang et al. 2016; Cheng et al. 2017) and pre-trained language models (Xu et al. 2020; Dai et al. 2021). Beyond them, to further link the aspect terms and opinion words, some studies build dependency trees of sentences and then generate aspect term representations by employing graph convolution networks (GCN) (Sun et al. 2019; Wang et al. 2020; Chen, Teng, and Zhang 2020; Li et al. 2021a).  \\n\\nThe success of the GCN-based approach underscores the pivotal role that opinion words play in the realm of ABSA. However, recent research has highlighted a complex scenario characterized by a lack of distinct opinion words, termed ”implicit sentiment” (Li et al. 2021b; Wang et al. 2022b). To delve into this phenomenon, we select four examples from the Rest.14 to compare the implicit and explicit sentiment sentences. In the context of Fig.1(a), the sentiment is discernible due to distinct opinion words. In contrast, as shown in Fig.1(b), unraveling the sentiment associated with aspect terms such as ”meal,” ”interior decoration,” and ”chefs” is challenging. Implicit sentiment is a prevalent occurrence within ABSA datasets and it is hard to deal with (Li et al. 2021b).  \\n\\nTo tackle the challenge mentioned above, in the paper, we design a novel ABSA method by integrating Explicit Sentiment A ugmentations ( ABSA-ESA ). Such augmentations provide more sentiment clues for predicting sentiment polarity. We add them after the corresponding input sentence, forming new ABSA training data. To obtain the augmentations, we design an ABSA-specific explicit sentiment generation method. We aim to generate the sentences explicitly conveying the same sentiment polarity as their corresponding input sentences, targeting the same (or similar) aspect terms . We post-train the generation model T5 (Raffel et al. 2020) by the rule-based data selected in the ABSA dataset, making the generated augmentations comply with the above requirements. Furthermore, we introduce three strategies to confine the generated augmentations about their sentiment polarity and aspect terms. Specifically, in the training procedure, we employ the Syntax Distance Weighting and Unlikelihood Contrastive Regularization to lead the model to generate explicit opinion words with the same polarity as the input sentence. Subsequently, when engendering the augmentations, we employ the Constrained Beam Search to ensure the augmentations are aspect-related.  \\n\\nTo sum up, our contributions can be listed as follows:  \\n\\n• We propose a novel ABSA framework named ABSAESA, which focuses on solving the implicit sentiment issue by generating explicit sentiment augmentations.   \\n• We propose an ABSA-specific explicit sentiment generation method that generates augmentations with distinct opinion words for specific aspect terms.   \\n• Empirical results on two ABSA benchmarks show that ABSA-ESA outperforms other methods on both explicit and implicit accuracy.',\n",
       "  'related_works': 'Related Works\\n\\n\\n# Aspect-based Sentiment Analysis\\nAspect-Based Sentiment Analysis (ABSA) methods primarily focus on integrating sentiment information from contextual words into aspect terms. In earlier approaches, this was often achieved by utilizing LSTM or Bi-LSTM as encoders (Tang et al. 2016; Wang et al. 2016; Cheng et al. 2017). Consequently, recent advancements have embraced the Attention mechanism as the preferred encoder (Tang et al. 2016; Wang et al. 2016; Cheng et al. 2017). Notably, leveraging pre-trained language models has emerged as the prevailing trend in ABSA (Xu et al. 2020; Dai et al. 2021). Furthermore, to establish stronger connections between aspect terms and opinion words, numerous studies have delved into constructing dependency trees within sentences and refining aspect term representations using Graph Convolutional Networks (GCNs) (Sun et al. 2019; Wang et al. 2020; Li et al. 2021a).  \\n\\nConcurrently, alongside developing robust encoders, researchers have explored the enrichment of training data to provide external sentiment information for the model (He et al. 2019; Wang et al. 2022a; Yang et al. 2023). These additional data often lack fine-grained annotations and necessitate subsequent data processing. Addressing this, this paper integrates ABSA-specific augmentations into ABSA models, bypassing the need for extensive reprocessing.\\n\\n# Implicit Sentiment Analysis\\nImplicit sentiment classification, a pivotal subfield within sentiment analysis, was pioneered by Liu (2012), drawing significant scholarly interest. Initial works revolved around implicit sentiment at the sentence level (Deng, Wiebe, and Choi 2014; Choi, Wiebe, and Mihalcea 2017; Zhou et al. 2021a; Xu et al. 2022). Recent endeavors have shifted towards tackling implicit aspect-based sentiment classification (Li et al. 2021b; Wang et al. 2022b; Fei et al. 2023). A prevailing approach involves incorporating external knowledge to capture sentiment expression patterns. For instance, Xu et al. (2022) integrates external sentiment-related knowledge into sentence features, enhancing the model’s sentiment comprehension. Similarly, Li et al. (2021b) employs a post-training strategy with BERT, leveraging contrastive learning on expansive sentiment-annotated corpora. ABSAESA utilizes the data generated by the model instead of obtaining external knowledge.\\n\\n# Data Augmentation\\nWithin NLP, the data augmentation technique has gained substantial traction to expand the pool of available training instances. This approach finds widespread application across diverse domains, including text classification (Wu et al. 2022; Liu et al. 2022; Ouyang et al. 2022), neural machine translation (Lam, Schamoni, and Riezler 2022; Kambhatla, Born, and Sarkar 2022; Gao et al. 2019), and text generation (Bi, Li, and Huang 2021; Xu et al. 2021). Notably, recent strides in ABSA have similarly leveraged data augmentation (Chen, Faulkner, and Badyal 2022; Wang et al. 2022a; Hsu et al. 2021). However, their augmentation techniques tend to be relatively simple, e.g., token replacement, masked aspect prediction, and polarity reversal, limiting the semantic diversity of the enhanced samples. The augmentation method in this paper is based on the language model, which generates augmentations with rich sentiment information.\\n\\n# Our Proposed ABSA-ESA Model\\nIn this section, we introduce the proposed ABSA method named ABSA-ESA .\\n\\n# Overall Framework\\nGenerally speaking, ABSA methods take the review sentence $\\\\mathbf{s}\\\\doteq\\\\{s_{j}\\\\}_{j=1}^{M}$ and its corresponding aspect term $\\\\textbf{a}=$ $\\\\{a_{j}\\\\}_{j=1}^{|{\\\\bf a}|}$ as the model input, $M$ denotes the length of all polarity $y\\\\ \\\\in\\\\ \\\\mathcal{Y}\\\\ =$ $\\\\{\\\\mathsf{P o s i t i v e,N e g a t i v e,N e u t r a l}\\\\}$ {}for a . To deal with the sentences containing implicit sentiment, we extend this paradigm by introducing an augmented sentence ${\\\\bf{s}}^{\\\\prime}$ following the initial input s. This augmented sentence contains explicit sentiment tied to the aspect term a . For clarity, we present the comprehensive framework of ABSA-ESA in Figure 2.  \\n\\nTo generate the augmented sentence ${\\\\bf{s}}^{\\\\prime}$ , we propose an ABSA-specific explicit sentiment generation method. We post-train T5 by utilizing $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ as generation targets selected from the dataset. $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ has the same (or similar) aspect terms and sentiment as swhile also incorporating explicit sentiment expressions. Additionally, we utilize three strategies to guide the generation concerning sentiment polarity and aspect terms. During the training phase, a Syntax Distance Weighting strategy is implemented to prioritize context words closest to the aspect term in the dependency parse. Furthermore, we also gather $\\\\bar{\\\\mathbf{s}}^{\\\\prime}$ , which has the opposite sentiment of s, for Unlikelihood Contrastive Regularization. It instructs the model about undesirable word choices. When generating ${\\\\bf s}^{\\\\prime}$ , we employ Constrained Beam Search to ensure that the aspect term or its similar aspect is included in the augmentations and its context words are the most relevant to a .  \\n\\n  \\nFigure 2: Overall framework of ABSA-ESA.  \\n\\nNext, we introduce the details of the ABSA-specific explicit sentiment generation method.\\n\\n# Training Data Collection\\nTo train the explicit sentiment generation model, the initial step is to gather the training data. Given an input sentence sand its corresponding aspect term a , the generating target $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ must satisfy the following rules:  \\n\\n• The target sentence should incorporate the same (or similar) aspect term as the input sentence.   \\n• The target sentence should exhibit identical sentiment polarity to the input data.   \\n• The target sentence must contain explicit sentiment expressions .  \\n\\nTo obtain the target sentence $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ that satisfies the given rules, we begin by aggregating all aspect terms in dataset $\\\\mathcal{D}$ aspect term to construct the aspect term set $\\\\bar{\\\\bf a}_{i}$ is associated with a representation $\\\\bar{\\\\mathcal{A}}~=~\\\\{\\\\bar{\\\\mathbf{a}}_{i}\\\\}_{i=1}^{|\\\\mathcal{A}|}$ $r_{\\\\bar{\\\\mathbf{a}}_{i}}{}^{1}$ . Each acquired by consulting the GloVe embedding table (Pennington, Socher, and Manning 2014). Utilizing these represen$\\\\mathcal{R}~=~\\\\{r_{\\\\bar{\\\\mathbf{a}}_{i}}\\\\}_{i=1}^{|A|}$ , we formulate a similarity matrix $\\\\mathbf{C}\\\\,=\\\\,\\\\{c_{i j}\\\\}_{|A|\\\\times|A|}$ tween aspect terms {}|A|×|A| $\\\\bar{\\\\bf a}_{i}$ and here $\\\\bar{\\\\mathbf{a}}_{j}.\\\\,c_{i j}$ $c_{i j}$ represents the similarity beis computed by the cosine distance:  \\n\\n$$\\nc_{i j}=\\\\cos(r_{\\\\bar{\\\\mathbf{a}}_{i}},r_{\\\\bar{\\\\mathbf{a}}_{j}}).\\n$$  \\n\\nWith the similarity matrix $\\\\mathbf{C}$ available, we proceed to the selection of $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ . According to Li et al. (2021b), the dataset $\\\\mathcal{D}$ $\\\\mathcal{D}_{i}$ Dsions, we choose the can be divided int . As sentences in $\\\\mathcal{D}_{e}$ ˆ$\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ contain explicit sentiment expresplicit subset from this subset to fulfill the third $\\\\mathcal{D}_{e}$ and implicit subset rule above.  \\n\\nWe firs elect $k_{c}$ aspect terms fro $\\\\mathcal{R}$ , which are most similar to a , thereby f ing the set A $\\\\mathcal{A^{\\\\prime}}$ . Subsequently, extract sentences from and share the same sentiment as D$\\\\mathcal{D}_{e}$ containing aspect terms from s. This forms the candidate A target sentence s $\\\\hat{S}_{t}$ rom this set, we randomly choose a target sentence $\\\\hat{\\\\mathbf{s}}^{\\\\prime}\\\\in\\\\hat{S}_{t}$ ∈Sto generate training data $(\\\\mathbf{s},\\\\hat{\\\\mathbf{s}}^{\\\\prime},\\\\mathbf{a})$ with the input sentence and the corresponding aspect term. This process is iterated for all input sentences $\\\\mathbf{s}\\\\in\\\\mathcal{D}$ , resultward, we begin to post-train T5 by ing the final training dataset $\\\\hat{\\\\mathcal{D}}\\\\,=\\\\,\\\\{(\\\\mathbf{s}_{i},\\\\hat{\\\\mathbf{s}}_{i}^{\\\\prime},\\\\mathbf{a}_{i})\\\\}_{i=1}^{N}$ D.}. After'},\n",
       " {'paper_id': '6269f8245aee126c0f049bda',\n",
       "  'paper_title': 'A Span-level Bidirectional Network for Aspect Sentiment Triplet Extraction',\n",
       "  'abstract': 'Abstract\\n\\nAspect Sentiment Triplet Extraction (ASTE) is a new fine-grained sentiment analysis task that aims to extract triplets of aspect terms, sentiments, and opinion terms from review sentences. Recently, span-level models achieve gratifying results on ASTE task by taking advantage of whole span predictions. However, all the spans generated by these methods inevitably share at least one token with some others, and these method suffer from the similarity of these spans due to their similar distributions. Moreover, since either the aspect term or opinion term can trigger a sentiment triplet, it is challenging to make use of the information more comprehensively and adequately. To address these concerns, we propose a span-level bidirectional cross-attention framework. Specifically, we design a similar span separation loss to detach the spans with shared tokens and a bidirectional cross-attention structure that consists of aspect and opinion decoders to decode the span-level representations in both aspect-to-opinion and opinionto-aspect directions. With differentiated span representations and bidirectional decoding structure, our model can extract sentiment triplets more precisely and efficiently. Experimental results show that our framework significantly outperforms stateof-the-art methods, achieving better performance in predicting triplets with multi-token entities and extracting triplets in sentences with multi-triplets.\\n\\n# 1 Introduction\\nAspect-based sentiment analysis (ABSA) is an important field in natural language processing (NLP). The ABSA task contains various fundamental subtasks, such as aspect term extraction (ATE), opinion term extraction (OTE), and aspectlevel sentiment classification (ASC). Recent studies focus on solving these tasks individually or doing a combination of two subtasks, such as aspect term polarity co-extraction (APCE), aspect opinion co-extraction (AOCE), and aspectopinion pair extraction (AOPE). However, none of these subtasks aims to extract the aspect terms (AT) with their corresponding opinion terms (OT) and sentiment polarity (SP) simultaneously. To tackle this problem, [Peng et al. , 2020] propose the aspect sentiment triplet extraction (ASTE) task which aims to extract ( AT, OT, SP ) triplets such as ( hot dogs, top notch, positive ) and ( coffee, average, negative ) in the example of Figure 1.  \\n\\n  \\nFigure 1: An example of ABSA subtasks. The spans highlighted in blue are aspect terms. The spans in red are opinion terms. Sentiments are marked with green.  \\n\\nTo solve the ASTE task, recent works [Peng et al. , 2020; Wu et al. , 2020; Mao et al. , 2021] use sequential token-level methods and formulate this task as a sequence tagging problem. Although these works achieve competitive results, their token-level model suffer from cascading errors due to sequential decoding. Therefore, [Xu et al. , 2021] propose a spanlevel model to capture the span-to-span interactions among ATs and OTs by enumerating all possible spans as input. However, enumerating all possible spans inevitably causes each span to have other spans with which it shares tokens. For example, the aspect span hot dogs shares tokens with The hot dogs ,The hot ,hot dogs are , and so on. These similar spans may have adjacent distributions in the feature spaces, which lead to the false prediction of downstream tasks.  \\n\\nBesides, although bidirectional predicting sentiment triplets has been proved to be effective in ASTE task, existing bidirectional approaches [Chen et al. , 2021] have a few drawbacks. Although these works propose a bidirectional framework to identify aspect sentiment triplets in both aspect-toopinion and opinion-to-aspect directions, they fail to classify sentiments for triplets in parallel, which ignores the interdependence and indicative association among different triplet candidates. For example, the triplets ( coffee, average, negative ) can help infer the sentiment polarity of ( coffee, top notch ) pair during sentiment classification because average and top notch express opposite sentiment tendencies. Apart from that, existing bidirectional approaches are mostly based on token-level models, which suffer from cascading errors due to sequential decoding.  \\n\\nIn this paper, we propose a span-level bidirectional crossattention (SBC) framework for ASTE task. Similar to prior span-level works [Lee et al. , 2017; Zhao et al. , 2020; Zhong and Chen, 2020; Dixit and Al-Onaizan, 2019], our framework enumerates all possible spans as the input. To separate the share-token spans which have adjacent distributions in feature space, we design a similar span separation loss to maximize the KL divergence of the distributions among similar spans. Based on the differentiated span representations, we further design a bidirectional cross-attention structure consist of an aspect decoder and an opinion decoder to identify triplets in both aspect-to-opinion and opinion-to-aspect directions, as shown in Figure 1. In the aspect-to-opinion direction, the aspect decoder aims to extract ATs such as $\\\\{h o t\\\\,d o g s,\\\\,c o f\\\\!f e e\\\\}$ the opinion decoder aim tAnalogously, in the opinion-to-aspect direction, the opinion OTs such as {$\\\\{t o p\\\\,n o t c h\\\\}$ }for each specific AT like {$\\\\{h o t\\\\,d o g s\\\\}$ }.decoder and aspect decoder are aim to extract OTs and their corresponding ATs, respectively. In each direction, the sentiments are classified both in AT and OT extractions, and the sentiment of triplet is determined based on the confidence scores. To verify the effectiveness of our framework, we conduct a series of experiments based on four benchmark datasets. The experimental results show our framework substantially outperforms the existing methods. In summary, our contributions are as follows:  \\n\\n• We design a span-level bidirectional cross-attention framework to identify aspect sentiment triplets in both aspect-to-opinion and opinion-to-aspect directions. By this design, our framework can effectively and explicitly build the associations among triplet factors, which is more in line with human cognition.   \\n• We propose the similar span separation loss to separate the representations of spans which share same tokens. Based on these differentiated span representations, the bidirectional cross-attention structure can extract the sentiment triplets more precisely.   \\n• Our proposed framework not only achieves state-of-theart performance in ASTE task, but better results in multitoken span and multi-triplet sentence scenarios.',\n",
       "  'introduction': 'Introduction\\n\\nAspect-based sentiment analysis (ABSA) is an important field in natural language processing (NLP). The ABSA task contains various fundamental subtasks, such as aspect term extraction (ATE), opinion term extraction (OTE), and aspectlevel sentiment classification (ASC). Recent studies focus on solving these tasks individually or doing a combination of two subtasks, such as aspect term polarity co-extraction (APCE), aspect opinion co-extraction (AOCE), and aspectopinion pair extraction (AOPE). However, none of these subtasks aims to extract the aspect terms (AT) with their corresponding opinion terms (OT) and sentiment polarity (SP) simultaneously. To tackle this problem, [Peng et al. , 2020] propose the aspect sentiment triplet extraction (ASTE) task which aims to extract ( AT, OT, SP ) triplets such as ( hot dogs, top notch, positive ) and ( coffee, average, negative ) in the example of Figure 1.  \\n\\n  \\nFigure 1: An example of ABSA subtasks. The spans highlighted in blue are aspect terms. The spans in red are opinion terms. Sentiments are marked with green.  \\n\\nTo solve the ASTE task, recent works [Peng et al. , 2020; Wu et al. , 2020; Mao et al. , 2021] use sequential token-level methods and formulate this task as a sequence tagging problem. Although these works achieve competitive results, their token-level model suffer from cascading errors due to sequential decoding. Therefore, [Xu et al. , 2021] propose a spanlevel model to capture the span-to-span interactions among ATs and OTs by enumerating all possible spans as input. However, enumerating all possible spans inevitably causes each span to have other spans with which it shares tokens. For example, the aspect span hot dogs shares tokens with The hot dogs ,The hot ,hot dogs are , and so on. These similar spans may have adjacent distributions in the feature spaces, which lead to the false prediction of downstream tasks.  \\n\\nBesides, although bidirectional predicting sentiment triplets has been proved to be effective in ASTE task, existing bidirectional approaches [Chen et al. , 2021] have a few drawbacks. Although these works propose a bidirectional framework to identify aspect sentiment triplets in both aspect-toopinion and opinion-to-aspect directions, they fail to classify sentiments for triplets in parallel, which ignores the interdependence and indicative association among different triplet candidates. For example, the triplets ( coffee, average, negative ) can help infer the sentiment polarity of ( coffee, top notch ) pair during sentiment classification because average and top notch express opposite sentiment tendencies. Apart from that, existing bidirectional approaches are mostly based on token-level models, which suffer from cascading errors due to sequential decoding.  \\n\\nIn this paper, we propose a span-level bidirectional crossattention (SBC) framework for ASTE task. Similar to prior span-level works [Lee et al. , 2017; Zhao et al. , 2020; Zhong and Chen, 2020; Dixit and Al-Onaizan, 2019], our framework enumerates all possible spans as the input. To separate the share-token spans which have adjacent distributions in feature space, we design a similar span separation loss to maximize the KL divergence of the distributions among similar spans. Based on the differentiated span representations, we further design a bidirectional cross-attention structure consist of an aspect decoder and an opinion decoder to identify triplets in both aspect-to-opinion and opinion-to-aspect directions, as shown in Figure 1. In the aspect-to-opinion direction, the aspect decoder aims to extract ATs such as $\\\\{h o t\\\\,d o g s,\\\\,c o f\\\\!f e e\\\\}$ the opinion decoder aim tAnalogously, in the opinion-to-aspect direction, the opinion OTs such as {$\\\\{t o p\\\\,n o t c h\\\\}$ }for each specific AT like {$\\\\{h o t\\\\,d o g s\\\\}$ }.decoder and aspect decoder are aim to extract OTs and their corresponding ATs, respectively. In each direction, the sentiments are classified both in AT and OT extractions, and the sentiment of triplet is determined based on the confidence scores. To verify the effectiveness of our framework, we conduct a series of experiments based on four benchmark datasets. The experimental results show our framework substantially outperforms the existing methods. In summary, our contributions are as follows:  \\n\\n• We design a span-level bidirectional cross-attention framework to identify aspect sentiment triplets in both aspect-to-opinion and opinion-to-aspect directions. By this design, our framework can effectively and explicitly build the associations among triplet factors, which is more in line with human cognition.   \\n• We propose the similar span separation loss to separate the representations of spans which share same tokens. Based on these differentiated span representations, the bidirectional cross-attention structure can extract the sentiment triplets more precisely.   \\n• Our proposed framework not only achieves state-of-theart performance in ASTE task, but better results in multitoken span and multi-triplet sentence scenarios.',\n",
       "  'related_works': 'Related Works\\n\\nAspect based sentiment analysis (ABSA) is a fine-grained sentiment analysis task that consists of various subtasks, including aspect term extraction (ATE) [Ma et al. , 2019], opinion term extraction (OTE) [Wu et al. , 2020], aspect-level sentiment classification (ASC) [Li et al. , 2019b]. Since these subtasks are solved individually, recent studies attempted to couple two subtasks as a compound task, such as aspect term polarity co-extraction (APCE) [Li et al. , 2019a], aspect and opinion co-extraction [Yu et al. , 2019], aspect category and sentiment classification [Hu et al. , 2018], and aspect-opinion pair extraction (AOPE) [Gao et al. , 2021]. Although many works have achieved great progress on these tasks, none of these tasks aims to identify the aspect terms as well as their corresponding opinion term and sentiment polarity.  \\n\\nTo tackle this issue, [Peng et al. , 2020] proposed the aspect sentiment triplet extraction (ASTE) task, which aimed to extract aspect terms, the sentiments of the aspect terms, and the opinion terms causing the sentiments. Some methods [Xu et al. , 2020; Wu et al. , 2020] designed a unified tagging scheme to solve this task. Some others [Chen et al. , 2021; ?] formulated this task as a multi-turn machine reading comprehension task and solve it with machine reading comprehension frameworks. Recently, [Xu et al. , 2021] had propose a span-level model to extract ATs and OTs first and then predict the sentiment relation for each (AT, OT)p airs, which suffers from the similar distribution of the representation of the share-token spans and the complexity from exhaustive pairing of every aspect and opinion span candidates.\\n\\n# 3 Methodology\\nAs shown in Figure 2, our SBC framework consists of five parts: task definition, span generation, similar span separation loss, bidirectional cross-attention structure, and inference. The details of all parts are given in the following subsections.\\n\\n# 3.1 Task Definition\\nGiven a sentence $S=\\\\{w_{1},w_{2},\\\\ldots,w_{n}\\\\}$ consisting $n$ words, the goal of the ASTE task is to extract a set of aspect sentiment t $\\\\mathcal{T}=\\\\{(a,o,c)_{k}\\\\}_{k=1}^{|\\\\mathcal{T}|}$ from the given sentence $S$ ,where $(a,o,c)$ refers to (aspect term, opinion term, sentiment polarity) and $c\\\\in\\\\{P o s i t i v e,N e u t r a l,N e g a t i v e\\\\}$ .\\n\\n# 3.2 Span Generation\\nGiven a sentence in total. Each span $S$ with $\\\\mathbf{s}_{i}\\\\,=\\\\,\\\\bigl\\\\{w_{s t a r t(i)},\\\\cdot\\\\cdot\\\\cdot,w_{e n d(i)}\\\\bigr\\\\}$ $n$ tokens, there are $m$ possible spans is defined by all the tokens from $s t a r t(i)$ to $e n d(i)$ inclusive, and the maximum length of span $\\\\mathbf{s}_{i}$ is $l_{s}$ :  \\n\\n$$\\n1\\\\leq s t a r t(i)\\\\leq e n d(i)\\\\leq n\\n$$  \\n\\n$$\\ne n d(i)-s t a r t(i)\\\\leq l_{s}\\n$$  \\n\\nTo obtain span representations, we need to get the tokenlevel representations first. In this paper, we utilize BERT [Devlin et al. , 2018] as a sentence encoder to obtain token-level contextua sentence Szed representations . Then, the token-level representations are com$\\\\{\\\\mathbf{h}_{1},\\\\mathbf{h}_{2},\\\\dotsc,\\\\mathbf{h}_{n}\\\\}$ of the given bined by max pooling. Note that various methods can be applied to generate the representations for spans, the effectiveness of these span generation methods will be investigated in the ablation study in Appendix. We define the representation of span $\\\\mathbf{s}_{i}$ as:  \\n\\n$\\\\mathbf{g}_{i}=M a x\\\\left(\\\\mathbf{h}_{s t a r t}(i),\\\\mathbf{h}_{s t a r t+1}(i),\\\\ldots,\\\\mathbf{h}_{e n d}(i)\\\\right)$ where Max represents max pooling.  \\n\\n  \\nFigure 2: The overall architecture of the span-level bidirectional cross-attention (SBC) framework. The ‘Select Span Representation’ means that only the original span representations of aspect candidates and opinion candidates are passed to aspect attention module and opinion attention module, respectively. The blue arrows and modules as well as red arrows and modules indicate the extraction of aspect-to-opinion direction and the opinion-to-aspect direction, respectively.\\n\\n# 3.3 Similar Span Separation Loss\\nAfter generating the representation of span, most previous models directly use the span representations for downstream tasks. However, enumerating all possible spans in a sentence inevitably generates lots of spans that have same tokens with some others, and the model may suffer from the limitations in processing these similar spans due to their adjacent distribution. To separate these spans with similar distributions, we propose a similar span separation loss function based on KL divergence for separating spans with shared tokens, as shown in Figure 2. The similar span separation loss is defined as:  \\n\\n$$\\nK L(\\\\mathbf{g}_{i}||G_{i})=\\\\sum_{j}^{G_{i}}s o f t m a x(\\\\mathbf{g}_{i})l o g\\\\frac{s o f t m a x(\\\\mathbf{g}_{i})}{s o f t m a x(\\\\mathbf{g}_{j})}\\n$$  \\n\\n$$\\nK L(G_{i}||\\\\mathbf{g}_{i})=\\\\sum_{j}^{G_{i}}s o f t m a x(\\\\mathbf{g}_{j})l o g\\\\frac{s o f t m a x(\\\\mathbf{g}_{j})}{s o f t m a x(\\\\mathbf{g}_{i})}\\n$$  \\n\\n$$\\n\\\\mathcal{J}_{K L}=\\\\sum_{i}^{m}l o g(1+\\\\frac{2}{K L(G_{i}||\\\\mathbf{g}_{i})+K L(\\\\mathbf{g}_{i}||G_{i})})\\n$$  \\n\\nwhere $G_{i}$ indicates the set of the representation of spans which share at least one token with $\\\\mathbf{s}_{i}$ .\\n\\n# 3.4 Bidirectional Cross-attention Structure\\nAs the aspect sentiment triplet can be triggered by an AT or an OT, we further design a bidirectional cross-attention structure to decode the span representations. As shown in Figure 2, the bidirectional cross-attention structure consists of an aspect decoder and an opinion decoder. The details of each component of bidirectional cross-attention structure are given in the following subsections.'},\n",
       " {'paper_id': '624fa8da5aee126c0f3a5873',\n",
       "  'paper_title': 'BiSyn-GAT+: Bi-Syntax Aware Graph Attention Network for Aspect-based Sentiment Analysis',\n",
       "  'abstract': 'Abstract\\n\\nAspect-based sentiment analysis (ABSA) is a fine-grained sentiment analysis task that aims to align aspects and corresponding sentiments for aspect-specific sentiment polarity inference. It is challenging because a sentence may contain multiple aspects or complicated ( e.g., conditional, coordinating, or adversative) relations. Recently, exploiting dependency syntax information with graph neural networks has been the most popular trend. Despite its success, methods that heavily rely on the dependency tree pose challenges in accurately modeling the alignment of the aspects and their words indicative of sentiment, since the dependency tree may provide noisy signals of unrelated associations ( e.g., the “ conj ” relation between “ great ” and “ dreadful ” in Figure 2 ). In this paper, to alleviate this problem, we propose a Bi -Syn tax aware Graph At tention Network ( BiSyn-GAT+ ). Specifically, BiSyn-GAT+ fully exploits the syntax information ( e.g., phrase segmentation and hierarchical structure) of the constituent tree of a sentence to model the sentiment-aware context of every single aspect (called intra -context) and the sentiment relations across aspects (called inter -context) for learning. Experiments on four benchmark datasets demonstrate that BiSyn-GAT $^+$ outperforms the stateof-the-art methods consistently.\\n\\n# 1 Introduction\\nAspect-based sentiment analysis (ABSA) aims to identify the sentiment polarity towards a given aspect in the sentence. Many previous works ( Yang et al. ,2018 ;Li et al. ,2019 ) mainly focus on extracting sequence features via Recurrent Neural Networks (RNNs) or Convolution Neural Networks (CNNs) with attention mechanisms, which often assume that words closer to the target aspect are more likely to be related to its sentiment. However, the assumption might not be valid as exemplified in Figure 1 (a), “service” is obviously closer to “great” rather than “dreadful”, and these methods may assign the irrelevant opinion word “great” to “service” mistakenly.  \\n\\n  \\nFigure 1: Examples of ABSA task. Each underlined aspect is classified to corresponding sentiment polarity.  \\n\\n  \\nFigure 2: Dependency tree of “The food is great but the service and the environment are dreadful”. Two separate ellipses encircle its two clauses. The “conj” edge between “great” and “dreadful” is a noise.  \\n\\nTo mitigate this problem, there already exists several efforts ( Wang et al. ,2020a ;Chen et al. ,2020 )dedicated to research on how to effectively leverage non-sequential information ( e.g., syntactic information like dependency tree) via Graph Neural Networks (GNNs). Generally, a dependency tree ( i.e., Dep.Tree), linking the aspect terms to the syntactically related words, stays valid in the long-distance dependency problem. However, the inherent nature of Dep.Tree structure may introduce noise like the unrelated relations across clauses, such as “conj” relation between “great” and “dreadful\" in Figure 2 ,which discourages capturing the sentiment-aware context of each aspect, i.e., intra -context. Moreover, the Dep.Tree structure only reveals relations between words and, thereby, in most cases, is incapable of modeling complicated ( e.g., conditional, coordinating, or adversative) relations of sentences, therefore failing to capture sentiment relations between aspects, i.e., inter -context.  \\n\\n  \\nFigure 3: Constituent tree of the sentence “The food is great but the service and the environment are dreadful”. Context words are in rectangles and parsed phrase types are in rounded rectangles.  \\n\\nHence, in this paper, we consider fully exploiting the syntax information of the constituent tree to tackle the problem. Typically, a constituent tree (i.e., Con.Tree) often contains precise and discriminative phrase segmentation and hierarchical composition structure, which are helpful for correctly aligning the aspects and their corresponding words indicative of sentiment. The former can naturally divide a complicated sentence into multiple clauses, and the latter can discriminate different relations among aspects to infer the sentiment relations of different aspects. We illustrate this with an example in Figure 3 : (1) Clause “The food is great” and the clause “the service and environment are dreadful” are segmented by the phrase segmentation term “but”; (2) In Layer-1, the term “and” indicates the coordinating relation of “service” and “environment”, while the term “but” in Layer-3 reflects the adversative relation towards “food” and “service” (or “environment”).  \\n\\nThus, to better align aspect terms and corresponding sentiments, we propose a new framework, Bi -Syn tax aware Graph At tention Network (BiSyn-GAT+ ), to effectively leverage the syntax information of constituent tree by modeling intra -context and inter -context information. In particular, BiSyn-GAT $^+$ employs: 1) a syntax graph embedding to encode the intra -context of each aspect based on the fusion syntax information within the same clause in a bottom-up way, which combines the phrase-level syntax information of its constituent tree and the clause-level syntax information of its dependency tree. 2) an aspect-context graph consisting of phrase segmentation terms and all aspects to model the inter -context of each aspect. Specifically, it aggregates the sentiment information of other aspects according to the influence between the current aspect and its neighbor aspects, which is calculated based on aspect representations learned from bi-directional relations over the aspect context graph, respectively.  \\n\\nOur main contributions are as follows:  \\n\\n(1) To the best of our knowledge, this is the first work to exploit syntax information of constituent tree ( e.g., phrase segmentation and hierarchical structure) with GNNs for ABSA. Moreover, it shows superiority in the alignments between aspects and corresponding words indicative of sentiment.  \\n\\n(2) We propose a framework, Bi -Syn tax aware Graph At tention Network ( BiSyn-GAT+ ), to fully leverage syntax information of constituent tree (or, and dependency tree) by modeling the sentimentaware context of each single aspect and the sentiment relations across aspects.  \\n\\n(3) Extensive experiments on four datasets show that our proposed model achieves state-of-the-art performances.',\n",
       "  'introduction': 'Introduction\\n\\nAspect-based sentiment analysis (ABSA) aims to identify the sentiment polarity towards a given aspect in the sentence. Many previous works ( Yang et al. ,2018 ;Li et al. ,2019 ) mainly focus on extracting sequence features via Recurrent Neural Networks (RNNs) or Convolution Neural Networks (CNNs) with attention mechanisms, which often assume that words closer to the target aspect are more likely to be related to its sentiment. However, the assumption might not be valid as exemplified in Figure 1 (a), “service” is obviously closer to “great” rather than “dreadful”, and these methods may assign the irrelevant opinion word “great” to “service” mistakenly.  \\n\\n  \\nFigure 1: Examples of ABSA task. Each underlined aspect is classified to corresponding sentiment polarity.  \\n\\n  \\nFigure 2: Dependency tree of “The food is great but the service and the environment are dreadful”. Two separate ellipses encircle its two clauses. The “conj” edge between “great” and “dreadful” is a noise.  \\n\\nTo mitigate this problem, there already exists several efforts ( Wang et al. ,2020a ;Chen et al. ,2020 )dedicated to research on how to effectively leverage non-sequential information ( e.g., syntactic information like dependency tree) via Graph Neural Networks (GNNs). Generally, a dependency tree ( i.e., Dep.Tree), linking the aspect terms to the syntactically related words, stays valid in the long-distance dependency problem. However, the inherent nature of Dep.Tree structure may introduce noise like the unrelated relations across clauses, such as “conj” relation between “great” and “dreadful\" in Figure 2 ,which discourages capturing the sentiment-aware context of each aspect, i.e., intra -context. Moreover, the Dep.Tree structure only reveals relations between words and, thereby, in most cases, is incapable of modeling complicated ( e.g., conditional, coordinating, or adversative) relations of sentences, therefore failing to capture sentiment relations between aspects, i.e., inter -context.  \\n\\n  \\nFigure 3: Constituent tree of the sentence “The food is great but the service and the environment are dreadful”. Context words are in rectangles and parsed phrase types are in rounded rectangles.  \\n\\nHence, in this paper, we consider fully exploiting the syntax information of the constituent tree to tackle the problem. Typically, a constituent tree (i.e., Con.Tree) often contains precise and discriminative phrase segmentation and hierarchical composition structure, which are helpful for correctly aligning the aspects and their corresponding words indicative of sentiment. The former can naturally divide a complicated sentence into multiple clauses, and the latter can discriminate different relations among aspects to infer the sentiment relations of different aspects. We illustrate this with an example in Figure 3 : (1) Clause “The food is great” and the clause “the service and environment are dreadful” are segmented by the phrase segmentation term “but”; (2) In Layer-1, the term “and” indicates the coordinating relation of “service” and “environment”, while the term “but” in Layer-3 reflects the adversative relation towards “food” and “service” (or “environment”).  \\n\\nThus, to better align aspect terms and corresponding sentiments, we propose a new framework, Bi -Syn tax aware Graph At tention Network (BiSyn-GAT+ ), to effectively leverage the syntax information of constituent tree by modeling intra -context and inter -context information. In particular, BiSyn-GAT $^+$ employs: 1) a syntax graph embedding to encode the intra -context of each aspect based on the fusion syntax information within the same clause in a bottom-up way, which combines the phrase-level syntax information of its constituent tree and the clause-level syntax information of its dependency tree. 2) an aspect-context graph consisting of phrase segmentation terms and all aspects to model the inter -context of each aspect. Specifically, it aggregates the sentiment information of other aspects according to the influence between the current aspect and its neighbor aspects, which is calculated based on aspect representations learned from bi-directional relations over the aspect context graph, respectively.  \\n\\nOur main contributions are as follows:  \\n\\n(1) To the best of our knowledge, this is the first work to exploit syntax information of constituent tree ( e.g., phrase segmentation and hierarchical structure) with GNNs for ABSA. Moreover, it shows superiority in the alignments between aspects and corresponding words indicative of sentiment.  \\n\\n(2) We propose a framework, Bi -Syn tax aware Graph At tention Network ( BiSyn-GAT+ ), to fully leverage syntax information of constituent tree (or, and dependency tree) by modeling the sentimentaware context of each single aspect and the sentiment relations across aspects.  \\n\\n(3) Extensive experiments on four datasets show that our proposed model achieves state-of-the-art performances.',\n",
       "  'related_works': 'Related Works\\n\\nSentiment analysis is an important task in the field of natural language processing ( Zhang et al. ,2018 ;Yang et al. ,2020 ) and can be applied in downstream tasks, like emotional chatbot ( Wei et al. ,2019 ;Li et al. ,2020a ;Lan et al. ,2020 ;Wei et al. ,2021 ), recommendation system ( Zhao et al. ,2022 ;Wang et al. ,2020b ), QA system ( Wei et al. ,2011 ;Qiu et al. ,2021 ). Here we focus on a fine-grained sentiment analysis task — ABSA. Recently, deep learning methods have been widely adopted for ABSA task. These works can be divided into two main categories: methods without syntax information ( i.e., Syntax-free methods) and methods with syntax information ( i.e., Syntax-based methods).  \\n\\nSyntax-free methods : Neural networks with attention mechanisms ( Wang et al. ,2016 ;Chen et al. ,2017 ;Song et al. ,2019 ) have been widely used. Chen et al. (2017 ) adopts a multiple-attention mechanism to capture sentiment features. Song et al. (2019 ) uses an attentional encoder network (AEN) to excavate rich semantic information from word embeddings.  \\n\\nSyntax-based methods : Recently, utilizing dependency information with GNNs has become an effective way for ABSA. Zhang et al. (2019 )uses graph convolutional networks (GCN) to learn node representations from Dep.Tree. Tang et al. (2020 ) proposes a dependency graph enhanced dual-transformer network (DGEDT) by jointly considering representations from Transformers and corresponding dependency graph. Wang et al. (2020a )constructs aspect-oriented dependency trees and proposes R-GAT, extending the graph attention network to encode graphs with labeled edges. Li et al. (2021 ) proposes a dual graph convolutional networks (DualGCN) model, simultaneously considering syntax structures and semantic correlations. All above works use syntax information of Dep.Tree, which may introduce noise, as we said before. Thus, we exploit syntax information of Con.Tree with GNNs. Precisely, we follow the Con.Tree to aggregate information from words within the same phrases in a bottom-up way and capture intra -context information.  \\n\\nMoreover, some works resort to modeling aspectaspect relations. Some ( Hazarika et al. ,2018 ;Majumder et al. ,2018 ) adopt aspect representations to model relations by RNNs or memory networks, without utilizing context information. And some (Fan et al. ,2018 ;Hu et al. ,2019 ) propose alignment loss or orthogonal attention regulation to constrain aspect-level interactions, which fail when aspects have no explicit opinion expressions or multiple aspects share same opinion words. Recently, there are some works utilizing GNNs to model aspect relations. Liang et al. (2020 ) constructs an inter-aspect graph based on relative dependencies between aspects. Zhao et al. (2020 ) constructs a sentiment graph, where each node represents an aspect, and each edge represents the sentiment dependency relation. However, these works fail to explicitly use phrase segmentation information, such as conjunction words. Thus, we propose an aspect-context graph consisting of all aspects and phrase segmentation terms to model inter -context information.  \\n\\nGNNs with constituent tree : To our knowledge, we are the first work to utilize the constituent tree for ABSA task. But in aspect-category sentiment analysis task, which predicts sentiment polarity towards a given predefined category in the text, Li et al. (2020b ) proposes a Sentence ConstituentAware Network (SCAN) that generates representations of the nodes in Con.Tree. Unlike SCAN, we view parsed phrases as different spans of the input text instead of individual nodes. So we don’t introduce any inner nodes of Con.Tree ( e.g., “NP”,“VP” of Figure 3 ) into the representation space, decreasing the computational overhead.\\n\\n# 3 Methodology\\n\\n# 3.1 Overview\\nProblem Statement . Let $\\\\mathbf{s}~=~\\\\{w_{i}\\\\}_{n}$ and $\\\\textbf{A}=$ $\\\\{a_{j}\\\\}_{m}$ be a sentence and a predefined aspect set, where $n$ and $m$ are the number of words in sand the number of aspects in A , respectively. For each s,$\\\\mathbf{A_{s}}=\\\\{a_{i}|a_{i}\\\\in\\\\mathbf{A},a_{i}\\\\in\\\\mathbf{s}\\\\}$ denotes the aspects contained in s. We treat each multiple-word aspect as a single word for simplicity, so $a_{i}$ also means the $i$ -th word of s. The goal of ABSA is to predict the sentiment polarit $y_{i}\\\\in\\\\{$ positive, negative, neural $\\\\}$ for each aspect $a_{i}\\\\in\\\\mathbf{A_{s}}$ ∈.  \\n\\nArchitecture . As shown in Figure 4 , our proposed architecture takes the sentence and all aspects that appear in the text as the input, and outputs the sentiment predictions of the aspects. It contains three components: 1) the intra -context module encodes the input resentations of the target aspects, which contains $\\\\{w_{i}\\\\}$ to obtain aspect-specific reptwo encoders: a context encoder that outputs contextual word representations and a syntax encoder that utilizes syntax information of the parsed constituent tree (or, and dependency tree). 2) the inter -context module includes a relation encoder applied to the constructed aspect-context graph to output relation-enhanced representations. The aspect-context graph composes all aspects of the given sentence and phrase segmentation terms obtained from a designed rule-based map function applied to the constituent tree. 3) the sentiment classifier takes output representations of the above two modules to make predictions.'},\n",
       " {'paper_id': '636b1a6590e50fcafdf41891',\n",
       "  'paper_title': 'AX-MABSA: A Framework for Extremely Weakly Supervised Multi-label Aspect Based Sentiment Analysis',\n",
       "  'abstract': 'Abstract\\n\\nAspect Based Sentiment Analysis is a dominant research area with potential applications in social media analytics, business, finance, and health. Prior works in this area are primarily based on supervised methods, with a few techniques using weak supervision limited to predicting a single aspect category per review sentence. In this paper, we present an extremely weakly supervised multi-label Aspect Category Sentiment Analysis framework which does not use any labelled data. We only rely on a single word per class as an initial indicative information. We further propose an automatic word selection technique to choose these seed categories and sentiment words. We explore unsupervised language model posttraining to improve the overall performance, and propose a multi-label generator model to generate multiple aspect category-sentiment pairs per review sentence. Experiments conducted on four benchmark datasets showcase our method to outperform other weakly supervised baselines by a significant margin.',\n",
       "  'introduction': 'Introduction\\n\\nAspect-based sentiment analysis (ABSA) is a wellknown sentiment analysis task which provides more fine-grained information than simple sentiment understanding ( Liu ,2012 ). The main goal of ABSA is to find the aspects and its associated sentiment within a given text. While the works on ABSA have expanded in different directions, it has primarily two sub-tasks, Aspect Term Sentiment Analysis (ATSA) and Aspect Category Sentiment Analysis (ACSA) ( Xue and Li ,2018 ). ATSA consists of different tasks like aspect term extraction (Li et al. ,2018 ;Luo et al. ,2019 ;Li et al. ,2020a ;Shi et al. ,2021 ), aspect term sentiment classification ( He et al. ,2018 ;Chen and Qian ,2019 ;Hou et al. ,2021 ), opinion term extraction ( Dai and Song ,2019 ;He et al. ,2019 ;Chen and Qian ,2020b ), aspect-oriented opinion term extraction ( Fan et al. ,2019 ;Wu et al. ,2020a ), aspect-opinion pair extraction ( Zhao et al. ,2020 ), etc. For example, in the sentence “ The sushi is top-notch, the waiter is attentive, but the atmosphere is dull. \", ATSA would extract the aspect terms ‘ sushi ’, ‘ waiter ’ and ‘ atmosphere ’; opinion terms ‘ top-notch ’, ‘ attentive ’, and ‘dull ’; and their associated sentiments ‘ positive ’, ‘positive ’ and ‘ negative ’. The other sub-task ACSA aims to find the higher order aspect categories and its associated sentiment from a given text. In the above example, ACSA would detect the categories as ‘ food ’ (as ‘pasta’ is a type of ‘food’), ‘ service ’and ‘ ambience ’; and the associated sentiments as ‘positive ’, ‘ positive ’ and ‘ negative ’.  \\n\\nExisting research on ABSA is dominated by supervised methods, where labeled training data is provided ( Chen et al. ,2017 ;Xue and Li ,2018 ;Cai et al. ,2021 ;Liu et al. ,2021 ;Xu et al. ,2021 ;Yan et al. ,2021 ). A few works try to solve the problem in a weakly/semi-supervised manner, where a few labelled samples are provided ( Wang et al. ,2021a ). However, there has been a lack of study on ABSA using unsupervised methods , i.e., without using any labelled data. A few works also focused on unsupervised aspect term extraction ( Shi et al. ,2021 ). However, such works do not deal with the sentiment associated with the aspects. An existing work on weakly supervised ACSA ( Huang et al. ,2020 ) only considered a single aspect category per sentence – thus limiting the task to a larger extent.  \\n\\nMotivated by the above, in this work, we present a methodology for extremely weakly supervised ACSA task, where we do not need any labelled training samples. We solve both aspect category detection (ACD) and ACSA tasks (on each review sentence) just by using the surface text of aspect category and sentiment. Given $N$ review sentences, $C$ categories of interest and $P$ polarities of interest, the ACD task generates $C$ clusters, while the AC generates $(c_{i},\\\\,p_{j})$ tuples where $c_{i}\\\\in C$ ,and the representation learning perspective, wherein $p_{j}\\\\in P$ ∈. As in ( Wang et al. ,2021b ), we adopt representing sentences by class names leads to better clustering. We only use the surface text of the class names and unlabelled sentences to get aspect category and sentiment clusters.  \\n\\nHowever, in clustering, each review sentence would get only one label, thus limiting the task by a substantial extent. To tackle this, we propose X-MABSA , a multi-label generator model which makes use of dependency parser ( Qi et al. ,2020 )and a similarity-based attention mechanism to generate multiple categories and associated sentiment polarity labels for each review sentence. In addition, we find that sometimes the representative text of aspect categories (provided as input) is not present (or sparse) in the text corpus. This might lead to skewed representation of the classes in our framework and thus degrade performance. Therefore, we present an automatic surface word selection strategy which would represent the class names better. We combine this with our X-MABSA model and denote it as AX-MABSA.  \\n\\nWe also showcase that unsupervised posttraining of language model on domain specific data significantly improves the sentence representation and thus achieves better results for ACSA tasks. For this, we post-train BERT language model ( Devlin et al. ,2019 ) using domain specific unlabelled data. We perform experiments on four different benchmark aspect-based datasets ( Pontiki et al. ,2014 ,2015 ,2016 ;Cheng et al. ,2017 ), and compare with different supervised and weakly supervised baselines.  \\n\\nOur main contributions are as follows:  \\n\\n•an extremely weakly supervised method to solve the ACSA task without relying on any labelled data, and using only the class names as the only provided information; •an automatic surface word selection strategy for choosing a suitable word corresponding to each aspect and sentiment class; •use of BERT language model post-training on domain specific unlabelled data for semantic representation of review sentences; •a multi-label generator model which makes use of a dependency parser and a similaritybased attention mechanism for generating multiple aspect-sentiment labels for each sentence; and  \\n\\n•experimental results comparing our architecture with different existing baselines on four benchmark aspect datasets.',\n",
       "  'related_works': 'Related Works\\n\\nAspect Based Sentiment Analysis (ABSA) has gained significant attention for a long time, and research has been done in primarily two directions – Aspect Term Sentiment Analysis (ATSA) and Aspect Category Sentiment Analysis (ACSA).\\n\\n# 2.1 Aspect Term Sentiment Analysis\\nResearch on ATSA has been in different subcategories like,  \\n\\nAspect Term Extraction In this sub-task, aspect terms associated with a category are extracted from a given text. Prior research on this is based on sequence labelling problem ( Ma et al. ,2019 ;Li et al. ,2020a ). Li and Lam (2017 ) proposed a neural network-based deep multi-task framework with memory network for extracting aspect terms. Xu et al. (2018 ) presented a double embedding method which uses CNN ( LeCun et al. ,1995 )-based sequence tagging, while Li et al. (2018 ) considered summary of opinions expressed in text as well as the history of aspect detection for effective aspect term extraction. Chen and Qian (2020a ) proposed a soft prototype-based approach with aspect word correlations to improve quality. A few unsupervised methods have tried to improve performance by using traditional topic modelling-based models. Luo et al. (2019 ) proposed a neural network based unsupervised model which takes sememes for better lexical semantics. Shi et al. (2021 ) presented a self-supervised method which works on learning aspect embedding on the word embedding space for aspect extraction.  \\n\\nAspect-level Sentiment Classification In this sub-task, sentiment labels are assigned to each aspect term. Wang et al. (2016 ); Liu and Zhang (2017 ); Ma et al. (2017 ) proposed an attentionbased neural network model for aspect-level sentiment classification (ASC). Tay et al. (2018 ) modelled relationship between words and aspects using LSTM model ( Hochreiter and Schmidhuber ,1997) to improve ASC performance.He et al.(2018 ) showed that document knowledge transfer improved performance of ASC task. Chen and Qian (2019 ) proposed a transfer capsule network for transferring knowledge from document-level sentiment classification, while Hou et al. (2021 )adopted a dependency tree-based graph neural network to solve the ASC task.  \\n\\nAspect-oriented Opinion Extraction This task extracts opinion terms associated with aspect terms. Fan et al. (2019 ) designed a sequence label model which used LSTM ( Hochreiter and Schmidhuber ,1997 ) for aspect-oriented opinion extraction (AOE). Wu et al. (2020a ) proposed a tagging scheme for AOE task which uses CNN ( LeCun et al. ,1995 ), LSTM ( Hochreiter and Schmidhuber ,1997 ) and BERT ( Devlin et al. ,2019 ) for opinion extraction. Wu et al. (2020b ) proposed a transfer learning method for transferring knowledge from sentiment classification task to AOE task.  \\n\\nRecent works on ATSA have introduced more sub-tasks like aspect-opinion pair extraction, aspect-sentiment-opinion triplet extraction, aspectcategory-opinion-sentiment quadruple extraction, etc. Yan et al. (2021 ) proposed a BART ( Lewis et al. ,2020 ) -based model to solve all ATSA tasks. Cai et al. (2021 ) introduced a new task called, aspect-category-opinion-sentiment quadruple extraction, a BERT ( Devlin et al. ,2019 )-based model to deal with implicit aspects and opinion terms. Xu et al. (2021 ) proposed a new span-level method for the aspect-sentiment-opinion triplet extraction.\\n\\n# 2.2 Aspect Category Sentiment Analysis\\nAspect Category Sentiment Analysis (ACSA) finds aspect categories and their associated sentiments from a text. Research on this has been conducted on both Aspect Category Detection (ACD) and ACSA tasks. Ma et al. (2018 ) proposed a word attention-based hierarchical model which takes common-sense knowledge for solving ACSA task. Xue and Li (2018 ) presented a novel CNN ( LeCun et al. ,1995 )-based model for ACSA task. Liang et al. (2019 ) proposed an encoding scheme which was aspect-guided and able to perform aspectreconstruction. Sun et al. (2019 ) constructed an auxiliary text for aspects and reformed the ACSA as a classification task.  \\n\\nWang et al. (2020 ) proposed a novel dependency tree-based model and a relational graph attention network for encoding the sentences. Li et al. (2020b ) designed a multi-instance framework for multi-label ACSA task. Cai et al. (2020 ) reformed the task as sentiment-category with a two-layer hierarchy where the higher layer detected the sentiment while the lower layer detected the aspect category. Liang et al. (2021 ) presented a semisupervised framework having a beta distributionbased model. The model finds semantically related words from the context of a target aspect. Liu et al. (2021 ) solved the ACSA task as a text generative method using BART ( Lewis et al. ,2020 ). Zhang et al. (2021 ) presented aspect sentiment quad prediction task where ACSA was formulated as a paraphrase generation task.  \\n\\nAlmost all existing works on ACSA are based on supervised methods. In contrast, this work proposes a method for ACSA which does not require any labelled data and relies only on seed text for aspect class names.'},\n",
       " {'paper_id': '646c3addd68f896efa5d1680',\n",
       "  'paper_title': 'MvP: Multi-view Prompting Improves Aspect Sentiment Tuple Prediction.',\n",
       "  'abstract': 'Abstract\\n\\nGenerative methods greatly promote aspectbased sentiment analysis via generating a sequence of sentiment elements in a specified format. However, existing studies usually predict sentiment elements in a fixed order, which ignores the effect of the interdependence of the elements in a sentiment tuple and the diversity of language expression on the results. In this work, we propose Multiview Prompting (M VP) that aggregates sentiment elements generated in different orders, leveraging the intuition of human-like problemsolving processes from different views. Specifically, M VP introduces element order prompts to guide the language model to generate multiple sentiment tuples, each with a different element order, and then selects the most reasonable tuples by voting. M VP can naturally model multi-view and multi-task as permutations and combinations of elements, respectively, outperforming previous task-specific designed methods on multiple ABSA tasks with a single model. Extensive experiments show that M VP significantly advances the state-of-the-art performance on 10 datasets of 4 benchmark tasks, and performs quite effectively in low-resource settings. Detailed evaluation verified the effectiveness, flexibility, and cross-task transferability of M VP.',\n",
       "  'introduction': 'Introduction\\n\\nAspect-based sentiment analysis (ABSA) aims to predict tuples of sentiment elements of interest for a given text. There are four sentiment elements that constitute the main line of ABSA research: aspect term $(a)$ , aspect category $(c)$ , opinion term $(o)$ and sentiment polarity ( s) ( Zhang et al. ,2022 ). Given an example sentence, “I love the sushi badly!”, the corresponding elements are “sushi”, “food quality”, “love” and “positive”, respectively. Early studies focus on a single sentiment element like aspect term (Liu et al. ,2015 ;Ma et al. ,2019 ), aspect category (Zhou et al. ,2015 ) or sentiment polarity ( Wang et al. ,2016 ;Chen et al. ,2017 ). Recent works propose compound ABSA tasks involving multiple associated elements, such as aspect sentiment triplet extraction (ASTE) ( Peng et al. ,2020 ), target aspect sentiment detection (TASD) ( Wan et al. ,2020 ), aspect sentiment quad prediction (ASQP) (Zhang et al. ,2021a ) and aspect category opinion sentiment (ACOS) ( Cai et al. ,2020a ). Their target formats are shown in Table 1 .  \\n\\nTable 1: Aspect sentiment tuple prediction tasks with their corresponding outputs. Notably, although both ACOS and ASQP are the most complex quadratic prediction tasks, ACOS focuses on implicit aspects and opinions compared to ASQP. Detailed tasks and dataset statistics are shown in Appendix A .  \\n\\n\\n<html><body><table><tr><td>Task Output</td></tr><tr><td>Aspect Category Opinion Sentiment (ACOS) a,c,o,s AspectSentiment Quad Prediction (ASQP) a, 0.S AspectSentiment Triplet Extraction (ASTE) a, 0,S Target Aspect Sentiment tDetection (TASD) a, C,S</td></tr></table></body></html>  \\n\\nRecently, generative methods have been used to handle various ABSA tasks uniformly and achieved good performance ( Zhang et al. ,2022 ), where the common practice is to generate a sequence of sentiment elements in a specified format to leverage label semantics. To be specific, they use class index ( Yan et al. ,2021 ), sentiment element sequence (Zhang et al. ,2021d ), natural language ( Liu et al. ,2021a ;Zhang et al. ,2021b ), structured extraction schema ( Lu et al. ,2022b ) or opinion tree ( Bao et al. ,2022 ) as the target of the generation models.  \\n\\nHowever, previous works usually generate the sequence of sentiment elements in a left-to-right fixed order, which ignores the influence of the interdependence of the elements in a sentiment tuple and the diversity of language expression on the targets. For example, the $\\\\ ^{\\\\star}c\\\\Rightarrow s\\\\Rightarrow a\\\\Rightarrow o^{,\\\\flat}$ order in P ARAPHRASE (Zhang et al. ,2021b ) (Figure 1 ). This single-order generation has the following potential drawbacks: (1) Incompleteness, tuple prediction is not naturally a text generation task, the relationship among elements is not ordered but interdependent; (2) Instability, as shown in a study by Hu et al. (2022 ), the performance of different target template orders differs significantly; (3) Error accumulation, the previous prediction errors will be accumulated and affect later predictions.  \\n\\n  \\nFigure 1: Compared with predicting in a single order, M VP proposes element-order prompt learning to control the prediction order of sentiment element. M VP contains three steps: $\\\\textcircled{1}$ permutes multiple elements to form order prompts and constructs an appropriate subset in terms of conditional generation scores; $\\\\circledcirc$ generates multiple sequences consisting of tuples from different views based on the prompt subset. The element order of each tuple accords with the prompt in the input; $\\\\circled{3}$ aggregates the multiple predictions and obtains the final output.  \\n\\nTo address the above challenges, we propose Multiview Prompting (M VP) that aggregates sentiment elements predicted in different orders, leveraging the intuition of solving problems from different views in human reasoning and decision (Stanovich and West ,2000 ). Inspired by prompt chaining ( Liu et al. ,2021b ;Wei et al. ,2022b ;Wang et al. ,2022b ,a ), M VP introduces element orderbased prompt learning to control the prediction order of sentiment elements, enabling diverse target expressions. Compared to single-order generation, MVP mitigates the incompleteness and instability of a fixed order by receiving information from multiple views, while alleviating the potential error accumulation of generative methods via permutation of elements (Figure 1 ). Besides, M VP is naturally suited for training a single model to solve multiple ABSA tasks as combinations of elements, adaptively enabling knowledge transfer from related  \\n\\ntuple prediction tasks.  \\n\\nWe conduct extensive experiments on main aspect sentiment tuple prediction tasks, including ASQP, ACOS, ASTE and TASD. Empirical results show the superiority of M VP in supervised, lowresource, and cross-task transfer settings. In supervised settings, the single-task and multi-task MVP outperform the state-of-the-art by $1.34\\\\%$ and $1.69\\\\%$ absolute F1 scores on all tasks, respectively. At low resource settings, M VP has sizable improvement over strong baselines, and cross-task transfer brings a more remarkable improvement.  \\n\\nOur major contributions are as follows:  \\n\\n1) We introduce M VP, an element order-based prompt learning method that improves sentiment tuple prediction by aggregating multi-view results.  \\n\\n2) M VP naturally allows us to train a single model simultaneously on all tasks. To the best of our knowledge, the multi-tasking M VP is the first single model that substantially outperforms task-specific models on various ABSA tasks.  \\n\\n3) Experiments show that M VP significantly advances the state-of-the-art on 10 datasets of 4 tasks and is quite effective in low-resource settings.',\n",
       "  'related_works': 'Related Works\\ns\\nAspect-base Sentiment Analysis. ABSA has received wide attention in recent years. Early studies focused on extracting or predicting a single sentiment element like aspect term extraction ( Qiu et al. ,2011 ;Liu et al. ,2015 ;Ma et al. ,2019 ), aspect category detection ( Zhou et al. ,2015 ;Bu et al. ,2021 )or sentiment polarity classification for a given aspect ( Wang et al. ,2016 ;Chen et al. ,2017 ;Lei et al. ,2018 ,2019 ). Some works further consider the joint prediction of two associated elements ( Cai et al. ,2020b ), including aspect-opinion pair extraction ( Wang et al. ,2017 ;Chen et al. ,2020 ), aspect term-polarity co-extraction ( Huang and Carley ,2018 ;Luo et al. ,2019 ;Chen and Qian ,2020 ). And recent works propose more challenging ABSA tasks to predict sentiment triplets or quadruplets (Chen et al. ,2022 ), the most influential of which are ASTE ( Peng et al. ,2020 ;Zhai et al. ,2022 ), TASD (Wan et al. ,2020 ), ASQP ( Zhang et al. ,2021a ) and ACOS with an emphasis on the implicit aspects or opinions ( Cai et al. ,2020a ).  \\n\\nGenerative ABSA. Instead of separate or pipeline methods ( Phan and Ogunbona ,2020 ), most recent works attempt to tackle various ABSA problems using a unified framework ( Sun et al. ,2022 ). Generative methods achieve good performance in ABSA by mitigating the potential error propagation in pipeline methods and fully exploiting the rich label semantic information ( Paolini et al. ,2021 ;Zhang et al. ,2022 ;Yu et al. ,2023 ). They use sentiment element sequence ( Zhang et al. ,2021d ), natural language ( Liu et al. ,2021a ;Zhang et al. ,2021b ) and structured extraction schema ( Lu et al. ,2022b ) etc. as the generative targets. Recently proposed LEGO-ABSA ( Gao et al. ,2022 )and UnifiedABSA ( Wang et al. ,2022c ) focus on multi-tasking with task prompts or instruction design. Hu et al. (2022 ) firstly investigate element ordering and propose methods to augment targetside data with selected orders for the ASQP task. Despite the promising results, the augmentation may confuse the model with multiple targets for the same input (i.e., one-to-many), thus leading to discrepancies between inference and training. We fill the gap and eliminate such confusion by aligning training and inference with multi-view prompt learning.\\n\\n# 6 Conclusion\\nIn this work, we introduce an element order-based prompt learning method - M VP, which improves aspect-level opinion information prediction by simple yet effective multi-view results aggregation. Leveraging the intuition of solving problems from different views, M VP advances the research of generative modeling for tuple structure prediction. By combining and permuting the sentiment elements, our multi-tasking model substantially outperforms task-specific models on a variety of ABSA tasks. Detailed experiments show that our method signifi- cantly advances the state-of-the-art on benchmark datasets, in both supervised and low-resource settings. We hope our research will shed light on generative tuple prediction.\\n\\n# Limitations\\nDespite the state-of-the-art performances, our proposed methods still have some limitations for future directions. Firstly , multi-view prompting creates overheads of training and inference proportional to the number of views. For efficiency in practice, according to Figure 3 , M VP with a relatively small number of views behaves decently (e.g., 5 or 7). Secondly , we apply a simple yet effective aggregation strategy to combine the results of multiple views. More advanced strategies can be explored. Lastly , experiments only verified the consistent improvement on ABSA tasks, while intuitively, the idea of M VP that leverages multiple views can be expanded to any structure prediction tasks, such as information extraction, emotion-cause pair extraction, and stance detection.\\n\\n# Ethics Statement\\nWe conduct all the experiments on existing datasets widely used in previous public scientific papers. We keep fair and honest in our analysis of experimental results, and our work does not harm anyone. We open-sourced our code for further explorations.  \\n\\nAs for the broader impact, this work may foster further research in sentiment analysis using generative methods, contributing to the simplification and automation of user opinion mining in reality. Nevertheless, this work fine-tunes large pre-trained language models to generate sentiment tuples. Due to the large pre-training corpus based on the Internet, the predicted sentiment polarity is subject to unexpected bias with respect to gender, race, and intersectional identities ( Tan and Celis ,2019 ), which needs to be considered more broadly in the field of natural language processing.'}]"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "paper_id2chunks_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:08:21.053963Z",
     "start_time": "2025-01-14T02:07:22.334669Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<format>\n",
      "Title: Aspect-Based Sentiment Analysis: A Comprehensive Survey\n",
      "Section 1: Introduction to Aspect-Based Sentiment Analysis\n",
      "Description 1: Provide an overview of Aspect-Based Sentiment Analysis (ABSA), its significance, and applications in various domains such as social media analytics, business, finance, and health.\n",
      "\n",
      "Section 2: Challenges and Limitations in ABSA\n",
      "Description 2: Discuss the challenges and limitations faced in ABSA, including the lack of annotated data, noise in dependency trees, and the presence of implicit sentiment data.\n",
      "\n",
      "Section 3: Approaches and Techniques in ABSA\n",
      "Description 3: Explore the different approaches and techniques used in ABSA, such as knowledge graph augmented networks, multitask frameworks, data augmentation, unified token-pair classification, template-order data augmentation, aspect-oriented opinion alignment networks, and cross-domain data augmentation.\n",
      "\n",
      "Section 4: Datasets and Evaluation Metrics in ABSA\n",
      "Description 4: Present an overview of the datasets used in ABSA research, including Laptop14, Restaurant14, Twitter, METS-CoV, and the Writing Prompt dataset. Discuss the evaluation metrics employed to assess the performance of ABSA models, such as accuracy, F1 score, and human preference correlation.\n",
      "\n",
      "Section 5: Future Directions and Open Challenges in ABSA\n",
      "Description 5: Discuss the future directions and open challenges in ABSA research, including the need for more annotated data, improved handling of implicit sentiment, development of robust models, and exploration of transfer learning approaches.\n",
      "</format>\n",
      "<format>\n",
      "Title: Aspect-Based Sentiment Analysis: A Comprehensive Survey\n",
      "Section 1: Introduction to Aspect-Based Sentiment Analysis\n",
      "Description 1: Provide an overview of Aspect-Based Sentiment Analysis (ABSA), its significance, and its applications in various domains.\n",
      "\n",
      "Section 2: State-of-the-Art Methods and Techniques\n",
      "Description 2: Discuss the evolution of ABSA methods, including traditional machine learning approaches, deep learning techniques, and the role of pre-trained language models. Highlight the advancements in attention mechanisms, graph neural networks, and knowledge graph integration.\n",
      "\n",
      "Section 3: Challenges and Limitations in ABSA\n",
      "Description 3: Explore the challenges faced by ABSA models, such as handling implicit sentiment, addressing spurious correlations, and dealing with data scarcity. Discuss the limitations of current approaches and the need for further research.\n",
      "\n",
      "Section 4: Innovative Approaches and Future Directions\n",
      "Description 4: Present recent innovations in ABSA, including data augmentation techniques, multi-task learning frameworks, and the use of counterfactual data. Discuss the potential of these approaches for improving the robustness and generalization of ABSA models.\n",
      "\n",
      "Section 5: Conclusion and Future Research Directions\n",
      "Description 5: Summarize the key findings of the survey and highlight the potential impact of ABSA on various applications. Discuss the future research directions and open challenges in the field of ABSA.\n",
      "</format>\n",
      "<format>\n",
      "Title: A Comprehensive Survey on Aspect-Based Sentiment Analysis: Approaches, Challenges, and Future Directions\n",
      "\n",
      "Section 1: Introduction to Aspect-Based Sentiment Analysis\n",
      "Description 1: Provide an overview of Aspect-Based Sentiment Analysis (ABSA), its significance in natural language processing, and its applications in various domains.\n",
      "\n",
      "Section 2: State-of-the-Art Methods in ABSA\n",
      "Description 2: Discuss the evolution of ABSA methods, from traditional machine learning approaches to deep learning techniques like Recurrent Neural Networks (RNNs), Convolutional Neural Networks (CNNs), and Transformer-based models. Highlight the role of attention mechanisms and external knowledge integration in improving performance.\n",
      "\n",
      "Section 3: Data Augmentation Strategies for ABSA\n",
      "Description 3: Explore the importance of data augmentation in ABSA and discuss various techniques such as template-based methods, knowledge graph integration, and counterfactual data generation. Analyze their impact on model robustness and generalization.\n",
      "\n",
      "Section 4: Challenges and Limitations in ABSA\n",
      "Description 4: Identify the key challenges in ABSA, including the handling of implicit sentiment, domain adaptation, spurious correlation reduction, and the need for large annotated datasets. Discuss the limitations of current approaches and potential areas for improvement.\n",
      "\n",
      "Section 5: Future Directions and Research Opportunities\n",
      "Description 5: Outline the potential future directions for ABSA research, such as the development of more sophisticated models, exploration of cross-modal sentiment analysis, and the integration of ABSA with other NLP tasks. Highlight the research opportunities in addressing the existing challenges and advancing the field.\n",
      "</format>\n"
     ]
    }
   ],
   "source": [
    "# 生成一级提纲\n",
    "id2section = {\n",
    "    \"0\": \"abstract\",\n",
    "    \"1\": \"introduction\",\n",
    "    \"2\": \"related_works\",\n",
    "}\n",
    "outlines = []\n",
    "for i in range(3):\n",
    "    section_num = 5\n",
    "\n",
    "    outline_rag_result = consolidate_rag_result(paper_id2chunks_list, kind=id2section[str(i)])\n",
    "    prompt = __generate_prompt(ROUGH_OUTLINE_PROMPT, paras={'PAPER LIST': outline_rag_result[0], 'TOPIC': topic,\n",
    "                                                            'SECTION NUM': str(section_num)})\n",
    "    outline = zhipu_api(prompt)\n",
    "    print(outline)\n",
    "    outlines.append(outline)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:09:06.050726Z",
     "start_time": "2025-01-14T02:08:53.214055Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<format>\n",
      "Title: Aspect-Based Sentiment Analysis: A Comprehensive Survey\n",
      "\n",
      "Section 1: Introduction to Aspect-Based Sentiment Analysis\n",
      "Description 1: Provide an overview of Aspect-Based Sentiment Analysis (ABSA), its significance in natural language processing, and its applications across various domains such as social media analytics, business, finance, and health.\n",
      "\n",
      "Section 2: State-of-the-Art Methods and Techniques\n",
      "Description 2: Discuss the evolution of ABSA methods, including traditional machine learning approaches, deep learning techniques like Recurrent Neural Networks (RNNs), Convolutional Neural Networks (CNNs), and Transformer-based models. Highlight advancements in attention mechanisms, graph neural networks, knowledge graph integration, and the role of pre-trained language models.\n",
      "\n",
      "Section 3: Data Augmentation Strategies for ABSA\n",
      "Description 3: Explore the importance of data augmentation in ABSA and discuss various techniques such as template-based methods, knowledge graph integration, counterfactual data generation, and unified token-pair classification. Analyze their impact on model robustness and generalization.\n",
      "\n",
      "Section 4: Challenges and Limitations in ABSA\n",
      "Description 4: Identify and discuss the key challenges in ABSA, including handling implicit sentiment, addressing spurious correlations, dealing with data scarcity, domain adaptation, and noise in dependency trees. Highlight the limitations of current approaches and the need for further research.\n",
      "\n",
      "Section 5: Datasets and Evaluation Metrics in ABSA\n",
      "Description 5: Present an overview of the datasets commonly used in ABSA research, such as Laptop14, Restaurant14, Twitter, METS-CoV, and the Writing Prompt dataset. Discuss the evaluation metrics employed to assess the performance of ABSA models, including accuracy, F1 score, and human preference correlation.\n",
      "\n",
      "Section 6: Future Directions and Research Opportunities\n",
      "Description 6: Outline potential future directions for ABSA research, such as the development of more sophisticated models, exploration of cross-modal sentiment analysis, integration of ABSA with other NLP tasks, and the use of transfer learning approaches. Highlight research opportunities in addressing existing challenges and advancing the field.\n",
      "</format>\n"
     ]
    }
   ],
   "source": [
    "# 合并一级提纲\n",
    "outline_texts = ''\n",
    "for i, o in zip(range(len(outlines)), outlines):\n",
    "    outline_texts += f'---\\noutline_id: {i}\\n\\noutline_content:\\n\\n{o}\\n'\n",
    "outline_texts += '---\\n'\n",
    "prompt = __generate_prompt(MERGING_OUTLINE_PROMPT, paras={'OUTLINE LIST': outline_texts, 'TOPIC': topic})\n",
    "outline = zhipu_api(prompt)\n",
    "print(outline)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:10:35.646999Z",
     "start_time": "2025-01-14T02:10:35.643227Z"
    }
   },
   "outputs": [],
   "source": [
    "survey_title, survey_sections, survey_section_descriptions = extract_title_sections_descriptions(outline)\n",
    "print(survey_title + '\\n')\n",
    "for section_name, section_description in zip(survey_sections, survey_section_descriptions):\n",
    "    print(f\"Section Name: {section_name}\\nDescription: {section_description}\\n\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:15:50.896913Z",
     "start_time": "2025-01-14T02:12:42.079065Z"
    }
   },
   "outputs": [],
   "source": [
    "# 生成二级提纲\n",
    "sub_sections_list = []\n",
    "for section_name, section_description in zip(survey_sections, survey_section_descriptions):\n",
    "    query = f\"topic: {topic}. section name: {section_name}.\"\n",
    "    simple_rag_result = do_rag_simple(query)\n",
    "    subsection_rag_result = consolidate_rag_result(simple_rag_result, kind='simple')\n",
    "    prompt = __generate_prompt(SUBSECTION_OUTLINE_PROMPT,\n",
    "                               paras={'OVERALL OUTLINE': outline, 'SECTION NAME': section_name,\n",
    "                                      'SECTION DESCRIPTION': section_description, 'TOPIC': topic,\n",
    "                                      'PAPER LIST': subsection_rag_result[0]})\n",
    "    # print(prompt + '\\n')\n",
    "    sub_outlines = zhipu_api(prompt)\n",
    "    # print(sub_outlines)\n",
    "    sub_sections_list.append(sub_outlines)\n",
    "    # prompts.append(prompt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:15:57.047944Z",
     "start_time": "2025-01-14T02:15:57.039566Z"
    }
   },
   "outputs": [],
   "source": [
    "merged_outline = process_outlines(outline, sub_sections_list)\n",
    "print(merged_outline)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:16:09.323789Z",
     "start_time": "2025-01-14T02:16:09.320190Z"
    }
   },
   "outputs": [],
   "source": [
    "# 优化整体提纲\n",
    "prompt = __generate_prompt(EDIT_FINAL_OUTLINE_PROMPT2, paras={'OVERALL OUTLINE': merged_outline})\n",
    "print(prompt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:17:17.771738Z",
     "start_time": "2025-01-14T02:16:11.179611Z"
    }
   },
   "outputs": [],
   "source": [
    "final_outline = zhipu_api(prompt).replace('<format>\\n', '').replace('</format>', '')\n",
    "print(final_outline)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:52:21.134623Z",
     "start_time": "2025-01-14T02:52:21.129478Z"
    }
   },
   "outputs": [],
   "source": [
    "final_outline_wo_description = remove_descriptions(final_outline)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:52:38.910640Z",
     "start_time": "2025-01-14T02:52:38.905359Z"
    }
   },
   "outputs": [],
   "source": [
    "def parse_outline(outline):\n",
    "    result = {\n",
    "        \"title\": \"\",\n",
    "        \"sections\": [],\n",
    "        \"section_descriptions\": [],\n",
    "        \"subsections\": [],\n",
    "        \"subsection_descriptions\": []\n",
    "    }\n",
    "\n",
    "    # Split the outline into lines\n",
    "    lines = outline.split('\\n')\n",
    "\n",
    "    for i, line in enumerate(lines):\n",
    "        # Match title, sections, subsections and their descriptions\n",
    "        if line.startswith('# '):\n",
    "            result[\"title\"] = line[2:].strip()\n",
    "        elif line.startswith('## '):\n",
    "            result[\"sections\"].append(line[3:].strip())\n",
    "            # Extract the description in the next line\n",
    "            if i + 1 < len(lines) and lines[i + 1].startswith('Description:'):\n",
    "                result[\"section_descriptions\"].append(lines[i + 1].split('Description:', 1)[1].strip())\n",
    "                result[\"subsections\"].append([])\n",
    "                result[\"subsection_descriptions\"].append([])\n",
    "        elif line.startswith('### '):\n",
    "            if result[\"subsections\"]:\n",
    "                result[\"subsections\"][-1].append(line[4:].strip())\n",
    "                # Extract the description in the next line\n",
    "                if i + 1 < len(lines) and lines[i + 1].startswith('Description:'):\n",
    "                    result[\"subsection_descriptions\"][-1].append(lines[i + 1].split('Description:', 1)[1].strip())\n",
    "\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T05:22:17.562573Z",
     "start_time": "2025-01-14T05:22:17.558049Z"
    }
   },
   "outputs": [],
   "source": [
    "def generate_document(parsed_outline, subsection_contents):\n",
    "    document = []\n",
    "\n",
    "    # Append title\n",
    "    title = parsed_outline['title']\n",
    "    document.append(f\"# {title}\\n\")\n",
    "\n",
    "    # Iterate over sections and their content\n",
    "    for i, section in enumerate(parsed_outline['sections']):\n",
    "        document.append(f\"## {section}\\n\")\n",
    "        # Append subsections and their contents\n",
    "        for j, subsection in enumerate(parsed_outline['subsections'][i]):\n",
    "            document.append(f\"### {subsection}\\n\")\n",
    "            #      document.append(f\"{parsed_outline['subsection_descriptions'][i][j]}\\n\")\n",
    "            # Append detailed content for each subsection\n",
    "            if i < len(subsection_contents) and j < len(subsection_contents[i]):\n",
    "                document.append(subsection_contents[i][j] + \"\\n\")\n",
    "\n",
    "    return \"\\n\".join(document)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T08:01:24.098945Z",
     "start_time": "2025-01-14T08:01:24.093896Z"
    }
   },
   "outputs": [],
   "source": [
    "# def extract_citations(markdown_text):\n",
    "#     # 正则表达式匹配方括号内的内容\n",
    "#     pattern = re.compile(r'\\[(.*?)\\]')\n",
    "#     matches = pattern.findall(markdown_text)\n",
    "#     print(matches)\n",
    "#     # 分割引用，处理多引用情况，并去重\n",
    "#     citations = list()\n",
    "#     for match in matches:\n",
    "#         # 分割各个引用并去除空格\n",
    "#         parts = match.split(';')\n",
    "#         for part in parts:\n",
    "#             cit = part.strip()\n",
    "#             if cit not in citations:\n",
    "#                 citations.append(cit)\n",
    "#     return citations\n",
    "import re\n",
    "\n",
    "def extract_year(text):\n",
    "    match = re.search(r'(19|20)\\d{2}', text)  # 只匹配1900-2099的4位数字\n",
    "    return match.group(0) if match else None\n",
    "\n",
    "def extract_citations(markdown_text):\n",
    "    # 匹配 [] 内的内容\n",
    "    pattern = re.compile(r'\\[(.*?)\\]')\n",
    "    # pattern = re.compile(r'<sup>(.*?)</sup>')\n",
    "    matches = pattern.findall(markdown_text)\n",
    "\n",
    "    # 初始化一个列表来存储保留的 matches\n",
    "    filtered_matches = []\n",
    "\n",
    "    # 遍历 matches\n",
    "    i = 0\n",
    "    while i < len(matches):\n",
    "        # 判断元素是否为英文开头的字符串\n",
    "        if re.match(r'^[a-zA-Z]', matches[i]):\n",
    "            # 判断下一个元素是否是 0-9 的数字\n",
    "            if i + 1 < len(matches) and re.match(r'^\\d+$', matches[i + 1]):\n",
    "                # 如果都符合条件，则保留\n",
    "                paper_infos = matches[i].strip().split(',')\n",
    "                if len(paper_infos) < 3:\n",
    "                    year = extract_year(paper_infos[-1])\n",
    "                    if year:\n",
    "                        publish = paper_infos[-1].replace(year, '')\n",
    "                        paper_info = f'{paper_infos[0].strip()}, {publish.strip()}, {year.strip()}'\n",
    "                    else:\n",
    "                        paper_info = matches[i].strip()\n",
    "                    filtered_matches.append(paper_info)\n",
    "                else:\n",
    "                    match = re.search(r'(19|20)\\d{2}', paper_infos[1])\n",
    "                    if match:\n",
    "                        publish = paper_infos[1].replace(match.group(0), '')\n",
    "                        paper_info = f'{paper_infos[0].strip()}, {publish.strip()}, {paper_infos[2].strip()}'\n",
    "                        filtered_matches.append(paper_info)\n",
    "                    else:\n",
    "                        filtered_matches.append(matches[i].strip())\n",
    "                filtered_matches.append(matches[i + 1].strip())\n",
    "                i += 2  # 跳过下一个元素，因为它已经被处理\n",
    "            else:\n",
    "                # 如果下一个元素不是数字，则跳过这两个元素\n",
    "                i += 2\n",
    "        else:\n",
    "            # 如果不是英文开头的字符串，则跳过\n",
    "            i += 1\n",
    "\n",
    "    # 删除 raw_survey 中不符合条件的内容\n",
    "    for match in matches:\n",
    "        if match not in filtered_matches:\n",
    "            markdown_text = markdown_text.replace(f'[{match}]', '')\n",
    "\n",
    "    return filtered_matches, markdown_text"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T08:28:59.010961Z",
     "start_time": "2025-01-14T08:28:59.003428Z"
    }
   },
   "outputs": [],
   "source": [
    "from tqdm import tqdm\n",
    "\n",
    "\n",
    "# def replace_citations_with_numbers(citations, markdown_text):\n",
    "#     ids = self.db.get_titles_from_citations(citations)\n",
    "#\n",
    "#     citation_to_ids = {citation: idx for citation, idx in zip(citations, ids)}\n",
    "#\n",
    "#     paper_infos = self.db.get_paper_info_from_ids(ids)\n",
    "#     temp_dic = {p['id']: p['title'] for p in paper_infos}\n",
    "#\n",
    "#     titles = [temp_dic[_] for _ in tqdm(ids)]\n",
    "#\n",
    "#     ids_to_titles = {idx: title for idx, title in zip(ids, titles)}\n",
    "#     titles_to_ids = {title: idx for idx, title in ids_to_titles.items()}\n",
    "#\n",
    "#     title_to_number = {title: num + 1 for num, title in enumerate(titles)}\n",
    "#\n",
    "#     title_to_number = {title: num + 1 for num, title in enumerate(title_to_number.keys())}\n",
    "#\n",
    "#     number_to_title = {num: title for title, num in title_to_number.items()}\n",
    "#     number_to_title_sorted = {key: number_to_title[key] for key in sorted(number_to_title)}\n",
    "#\n",
    "#     def replace_match(match):\n",
    "#         citation_text = match.group(1)\n",
    "#\n",
    "#         individual_citations = citation_text.split(';')\n",
    "#\n",
    "#         numbered_citations = [str(title_to_number[ids_to_titles[citation_to_ids[citation.strip()]]]) for citation in\n",
    "#                               individual_citations]\n",
    "#\n",
    "#         return '[' + '; '.join(numbered_citations) + ']'\n",
    "#\n",
    "#     updated_text = re.sub(r'\\[(.*?)\\]', replace_match, markdown_text)\n",
    "#\n",
    "#     references_section = \"\\n\\n## References\\n\\n\"\n",
    "#\n",
    "#     references = {num: titles_to_ids[title] for num, title in number_to_title_sorted.items()}\n",
    "#     for idx, title in number_to_title_sorted.items():\n",
    "#         t = title.replace('\\n', '')\n",
    "#         references_section += f\"[{idx}] {t}\\n\\n\"\n",
    "#\n",
    "#     return updated_text + references_section, references\n",
    "def replace_citations_with_numbers(citations, markdown_text):\n",
    "    # 初始化计数器\n",
    "    counter = 1\n",
    "    # 用于记录每对 [英文][数字] 的第一次出现的序号\n",
    "    pair_to_number = {}\n",
    "\n",
    "    references_section = \"\\n\\n## References\\n\\n\"\n",
    "\n",
    "    # 遍历 filtered_matches，每次处理一对 [英文][数字]\n",
    "    for i in range(0, len(citations), 2):\n",
    "        # 构造当前 [英文][数字] 对的键\n",
    "        pair_key = (citations[i], citations[i+1])\n",
    "\n",
    "        # 如果这对 [英文][数字] 已经记录过，则使用记录的序号\n",
    "        if pair_key in pair_to_number:\n",
    "            replacement_number = pair_to_number[pair_key]\n",
    "        else:\n",
    "            # 否则，分配一个新的序号并记录到字典中\n",
    "            replacement_number = counter\n",
    "            pair_to_number[pair_key] = replacement_number\n",
    "            references_section += f\"[{replacement_number}] {citations[i]}, chunk {citations[i + 1]}\\n\\n\"\n",
    "            counter += 1\n",
    "\n",
    "        # 构造要替换的模式 [英文][数字]\n",
    "        pattern = re.compile(r'\\[{}\\s*\\]\\[\\s*{}\\]'.format(\n",
    "            re.escape(citations[i]),  # 转义英文部分\n",
    "            re.escape(citations[i+1]) # 转义数字部分\n",
    "        ))\n",
    "        # 构造要替换的模式 <sup>英文</sup><sup>数字</sup>\n",
    "        # pattern = re.compile(r'<sup>{}</sup><sup>{}</sup>'.format(\n",
    "        #     re.escape(citations[i]),  # 转义英文部分\n",
    "        #     re.escape(citations[i + 1])  # 转义数字部分\n",
    "        # ))\n",
    "\n",
    "        # 替换为对应的序号\n",
    "        markdown_text = pattern.sub(f'<sup>{replacement_number}</sup>', markdown_text)\n",
    "\n",
    "    return markdown_text + references_section"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def lce(topic, outline, contents, res_l, idx):\n",
    "    prompt = __generate_prompt(LCE_PROMPT, paras={'OVERALL OUTLINE': outline, 'PREVIOUS': contents[0],\n",
    "                                                  'FOLLOWING': contents[2], 'TOPIC': topic, 'SUBSECTION': contents[1]})\n",
    "    refined_content = zhipu_api(prompt).replace('<format>', '').replace('</format>', '')\n",
    "    #   print(prompt+'\\n---------------------------------\\n'+refined_content)\n",
    "    res_l[idx] = refined_content\n",
    "    return refined_content.replace('Here is the refined subsection:\\n', '')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-13T08:55:20.961454Z",
     "start_time": "2025-01-13T08:55:20.958143Z"
    }
   },
   "outputs": [],
   "source": [
    "with open('outline_multi-task.txt', 'r') as f:\n",
    "    final_outline = f.read()\n",
    "final_outline_wo_description = remove_descriptions(final_outline)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'# Multi-Task Learning\\n\\n## 1 Multi-Task Learning  \\n\\n### 1.1 Definition and Background  \\n\\n### 1.2 Core Concept  \\n\\n### 1.3 Advantages and Challenges  '"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "final_outline_wo_description"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:52:47.744913Z",
     "start_time": "2025-01-14T02:52:47.741322Z"
    }
   },
   "outputs": [],
   "source": [
    "parsed_outline = parse_outline(outline=final_outline)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'title': 'Multi-Task Learning',\n",
       " 'sections': ['1 Multi-Task Learning'],\n",
       " 'section_descriptions': ['Introduction to multi-task learning'],\n",
       " 'subsections': [['1.1 Definition and Background',\n",
       "   '1.2 Core Concept',\n",
       "   '1.3 Advantages and Challenges']],\n",
       " 'subsection_descriptions': [['Multi-Task Learning (MTL) is a learning paradigm that trains multiple related tasks simultaneously, improving model performance through shared representations.',\n",
       "   'Multi-task learning allows for the sharing of low-level features and facilitates knowledge transfer between tasks.',\n",
       "   'The advantages of multi-task learning include improved model efficiency and enhanced generalization ability. However, challenges exist, such as potential conflicts between tasks, making the design of an appropriate sharing mechanism crucial.']]}"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "parsed_outline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:53:00.559175Z",
     "start_time": "2025-01-14T02:52:48.605797Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "subsection.txt name: 1.1 Definition and Background\n",
      "subsection.txt name: 1.2 Core Concept\n",
      "subsection.txt name: 1.3 Advantages and Challenges\n"
     ]
    }
   ],
   "source": [
    "topic = \"Multi-Task Learning\"\n",
    "section_paper_texts = [[] for _ in range(len(parsed_outline['sections']))]\n",
    "section_references_ids = [[] for _ in range(len(parsed_outline['sections']))]\n",
    "for i in range(len(parsed_outline['sections'])):\n",
    "    for j in range(len(parsed_outline['subsections'][i])):\n",
    "        subsection_name = parsed_outline['subsections'][i][j]\n",
    "        print(f\"subsection.txt name: {subsection_name}\")\n",
    "        query = f\"topic: {topic}. section name: {subsection_name}.\"\n",
    "        simple_rag_result = do_rag_simple(query)\n",
    "        paper_ids = []\n",
    "        for paper in simple_rag_result:\n",
    "            paper_ids.append(paper['paper_id'])\n",
    "        section_references_ids[i].append(paper_ids)\n",
    "        subsection_rag_result = consolidate_rag_result(simple_rag_result, kind='simple')\n",
    "        section_paper_texts[i].append(subsection_rag_result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'paper_title: AX-MABSA: A Framework for Extremely Weakly Supervised Multi-label Aspect Based Sentiment Analysis\\npaper_metainfo: Conf_Paper_Meta_Data_EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing_with_whole_text.db\\nchunk_id: 1\\n# 1 Introduction\\nAspect-based sentiment analysis (ABSA) is a wellknown sentiment analysis task which provides more fine-grained information than simple sentiment understanding ( Liu ,2012 ). The main goal of ABSA is to find the aspects and its associated sentiment within a given text. While the works on ABSA have expanded in different directions, it has primarily two sub-tasks, Aspect Term Sentiment Analysis (ATSA) and Aspect Category Sentiment Analysis (ACSA) ( Xue and Li ,2018 ). ATSA consists of different tasks like aspect term extraction (Li et al. ,2018 ;Luo et al. ,2019 ;Li et al. ,2020a ;Shi et al. ,2021 ), aspect term sentiment classification ( He et al. ,2018 ;Chen and Qian ,2019 ;Hou et al. ,2021 ), opinion term extraction ( Dai and Song ,2019 ;He et al. ,2019 ;Chen and Qian ,2020b ), aspect-oriented opinion term extraction ( Fan et al. ,2019 ;Wu et al. ,2020a ), aspect-opinion pair extraction ( Zhao et al. ,2020 ), etc. For example, in the sentence “ The sushi is top-notch, the waiter is attentive, but the atmosphere is dull. \", ATSA would extract the aspect terms ‘ sushi ’, ‘ waiter ’ and ‘ atmosphere ’; opinion terms ‘ top-notch ’, ‘ attentive ’, and ‘dull ’; and their associated sentiments ‘ positive ’, ‘positive ’ and ‘ negative ’. The other sub-task ACSA aims to find the higher order aspect categories and its associated sentiment from a given text. In the above example, ACSA would detect the categories as ‘ food ’ (as ‘pasta’ is a type of ‘food’), ‘ service ’and ‘ ambience ’; and the associated sentiments as ‘positive ’, ‘ positive ’ and ‘ negative ’.  \\n\\nExisting research on ABSA is dominated by supervised methods, where labeled training data is provided ( Chen et al. ,2017 ;Xue and Li ,2018 ;Cai et al. ,2021 ;Liu et al. ,2021 ;Xu et al. ,2021 ;Yan et al. ,2021 ). A few works try to solve the problem in a weakly/semi-supervised manner, where a few labelled samples are provided ( Wang et al. ,2021a ). However, there has been a lack of study on ABSA using unsupervised methods , i.e., without using any labelled data. A few works also focused on unsupervised aspect term extraction ( Shi et al. ,2021 ). However, such works do not deal with the sentiment associated with the aspects. An existing work on weakly supervised ACSA ( Huang et al. ,2020 ) only considered a single aspect category per sentence – thus limiting the task to a larger extent.  \\n\\nMotivated by the above, in this work, we present a methodology for extremely weakly supervised ACSA task, where we do not need any labelled training samples. We solve both aspect category detection (ACD) and ACSA tasks (on each review sentence) just by using the surface text of aspect category and sentiment. Given $N$ review sentences, $C$ categories of interest and $P$ polarities of interest, the ACD task generates $C$ clusters, while the AC generates $(c_{i},\\\\,p_{j})$ tuples where $c_{i}\\\\in C$ ,and the representation learning perspective, wherein $p_{j}\\\\in P$ ∈. As in ( Wang et al. ,2021b ), we adopt representing sentences by class names leads to better clustering. We only use the surface text of the class names and unlabelled sentences to get aspect category and sentiment clusters.  \\n\\nHowever, in clustering, each review sentence would get only one label, thus limiting the task by a substantial extent. To tackle this, we propose X-MABSA , a multi-label generator model which makes use of dependency parser ( Qi et al. ,2020 )and a similarity-based attention mechanism to generate multiple categories and associated sentiment polarity labels for each review sentence. In addition, we find that sometimes the representative text of aspect categories (provided as input) is not present (or sparse) in the text corpus. This might lead to skewed representation of the classes in our framework and thus degrade performance. Therefore, we present an automatic surface word selection strategy which would represent the class names better. We combine this with our X-MABSA model and denote it as AX-MABSA.  \\n\\nWe also showcase that unsupervised posttraining of language model on domain specific data significantly improves the sentence representation and thus achieves better results for ACSA tasks. For this, we post-train BERT language model ( Devlin et al. ,2019 ) using domain specific unlabelled data. We perform experiments on four different benchmark aspect-based datasets ( Pontiki et al. ,2014 ,2015 ,2016 ;Cheng et al. ,2017 ), and compare with different supervised and weakly supervised baselines.  \\n\\nOur main contributions are as follows:  \\n\\n•an extremely weakly supervised method to solve the ACSA task without relying on any labelled data, and using only the class names as the only provided information; •an automatic surface word selection strategy for choosing a suitable word corresponding to each aspect and sentiment class; •use of BERT language model post-training on domain specific unlabelled data for semantic representation of review sentences; •a multi-label generator model which makes use of a dependency parser and a similaritybased attention mechanism for generating multiple aspect-sentiment labels for each sentence; and  \\n\\n•experimental results comparing our architecture with different existing baselines on four benchmark aspect datasets.\\npaper_title: MvP: Multi-view Prompting Improves Aspect Sentiment Tuple Prediction.\\npaper_metainfo: Conf_Paper_Meta_Data_ACL_2023_with_whole_text.db\\nchunk_id: 1\\n# 1 Introduction\\nAspect-based sentiment analysis (ABSA) aims to predict tuples of sentiment elements of interest for a given text. There are four sentiment elements that constitute the main line of ABSA research: aspect term $(a)$ , aspect category $(c)$ , opinion term $(o)$ and sentiment polarity ( s) ( Zhang et al. ,2022 ). Given an example sentence, “I love the sushi badly!”, the corresponding elements are “sushi”, “food quality”, “love” and “positive”, respectively. Early studies focus on a single sentiment element like aspect term (Liu et al. ,2015 ;Ma et al. ,2019 ), aspect category (Zhou et al. ,2015 ) or sentiment polarity ( Wang et al. ,2016 ;Chen et al. ,2017 ). Recent works propose compound ABSA tasks involving multiple associated elements, such as aspect sentiment triplet extraction (ASTE) ( Peng et al. ,2020 ), target aspect sentiment detection (TASD) ( Wan et al. ,2020 ), aspect sentiment quad prediction (ASQP) (Zhang et al. ,2021a ) and aspect category opinion sentiment (ACOS) ( Cai et al. ,2020a ). Their target formats are shown in Table 1 .  \\n\\nTable 1: Aspect sentiment tuple prediction tasks with their corresponding outputs. Notably, although both ACOS and ASQP are the most complex quadratic prediction tasks, ACOS focuses on implicit aspects and opinions compared to ASQP. Detailed tasks and dataset statistics are shown in Appendix A .  \\n\\n\\n<html><body><table><tr><td>Task Output</td></tr><tr><td>Aspect Category Opinion Sentiment (ACOS) a,c,o,s AspectSentiment Quad Prediction (ASQP) a, 0.S AspectSentiment Triplet Extraction (ASTE) a, 0,S Target Aspect Sentiment tDetection (TASD) a, C,S</td></tr></table></body></html>  \\n\\nRecently, generative methods have been used to handle various ABSA tasks uniformly and achieved good performance ( Zhang et al. ,2022 ), where the common practice is to generate a sequence of sentiment elements in a specified format to leverage label semantics. To be specific, they use class index ( Yan et al. ,2021 ), sentiment element sequence (Zhang et al. ,2021d ), natural language ( Liu et al. ,2021a ;Zhang et al. ,2021b ), structured extraction schema ( Lu et al. ,2022b ) or opinion tree ( Bao et al. ,2022 ) as the target of the generation models.  \\n\\nHowever, previous works usually generate the sequence of sentiment elements in a left-to-right fixed order, which ignores the influence of the interdependence of the elements in a sentiment tuple and the diversity of language expression on the targets. For example, the $\\\\ ^{\\\\star}c\\\\Rightarrow s\\\\Rightarrow a\\\\Rightarrow o^{,\\\\flat}$ order in P ARAPHRASE (Zhang et al. ,2021b ) (Figure 1 ). This single-order generation has the following potential drawbacks: (1) Incompleteness, tuple prediction is not naturally a text generation task, the relationship among elements is not ordered but interdependent; (2) Instability, as shown in a study by Hu et al. (2022 ), the performance of different target template orders differs significantly; (3) Error accumulation, the previous prediction errors will be accumulated and affect later predictions.  \\n\\n  \\nFigure 1: Compared with predicting in a single order, M VP proposes element-order prompt learning to control the prediction order of sentiment element. M VP contains three steps: $\\\\textcircled{1}$ permutes multiple elements to form order prompts and constructs an appropriate subset in terms of conditional generation scores; $\\\\circledcirc$ generates multiple sequences consisting of tuples from different views based on the prompt subset. The element order of each tuple accords with the prompt in the input; $\\\\circled{3}$ aggregates the multiple predictions and obtains the final output.  \\n\\nTo address the above challenges, we propose Multiview Prompting (M VP) that aggregates sentiment elements predicted in different orders, leveraging the intuition of solving problems from different views in human reasoning and decision (Stanovich and West ,2000 ). Inspired by prompt chaining ( Liu et al. ,2021b ;Wei et al. ,2022b ;Wang et al. ,2022b ,a ), M VP introduces element orderbased prompt learning to control the prediction order of sentiment elements, enabling diverse target expressions. Compared to single-order generation, MVP mitigates the incompleteness and instability of a fixed order by receiving information from multiple views, while alleviating the potential error accumulation of generative methods via permutation of elements (Figure 1 ). Besides, M VP is naturally suited for training a single model to solve multiple ABSA tasks as combinations of elements, adaptively enabling knowledge transfer from related  \\n\\ntuple prediction tasks.  \\n\\nWe conduct extensive experiments on main aspect sentiment tuple prediction tasks, including ASQP, ACOS, ASTE and TASD. Empirical results show the superiority of M VP in supervised, lowresource, and cross-task transfer settings. In supervised settings, the single-task and multi-task MVP outperform the state-of-the-art by $1.34\\\\%$ and $1.69\\\\%$ absolute F1 scores on all tasks, respectively. At low resource settings, M VP has sizable improvement over strong baselines, and cross-task transfer brings a more remarkable improvement.  \\n\\nOur major contributions are as follows:  \\n\\n1) We introduce M VP, an element order-based prompt learning method that improves sentiment tuple prediction by aggregating multi-view results.  \\n\\n2) M VP naturally allows us to train a single model simultaneously on all tasks. To the best of our knowledge, the multi-tasking M VP is the first single model that substantially outperforms task-specific models on various ABSA tasks.  \\n\\n3) Experiments show that M VP significantly advances the state-of-the-art on 10 datasets of 4 tasks and is quite effective in low-resource settings.\\npaper_title: AX-MABSA: A Framework for Extremely Weakly Supervised Multi-label Aspect Based Sentiment Analysis\\npaper_metainfo: Conf_Paper_Meta_Data_EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing_with_whole_text.db\\nchunk_id: 2\\n# 2 Related Work\\nAspect Based Sentiment Analysis (ABSA) has gained significant attention for a long time, and research has been done in primarily two directions – Aspect Term Sentiment Analysis (ATSA) and Aspect Category Sentiment Analysis (ACSA).\\n\\n# 2.1 Aspect Term Sentiment Analysis\\nResearch on ATSA has been in different subcategories like,  \\n\\nAspect Term Extraction In this sub-task, aspect terms associated with a category are extracted from a given text. Prior research on this is based on sequence labelling problem ( Ma et al. ,2019 ;Li et al. ,2020a ). Li and Lam (2017 ) proposed a neural network-based deep multi-task framework with memory network for extracting aspect terms. Xu et al. (2018 ) presented a double embedding method which uses CNN ( LeCun et al. ,1995 )-based sequence tagging, while Li et al. (2018 ) considered summary of opinions expressed in text as well as the history of aspect detection for effective aspect term extraction. Chen and Qian (2020a ) proposed a soft prototype-based approach with aspect word correlations to improve quality. A few unsupervised methods have tried to improve performance by using traditional topic modelling-based models. Luo et al. (2019 ) proposed a neural network based unsupervised model which takes sememes for better lexical semantics. Shi et al. (2021 ) presented a self-supervised method which works on learning aspect embedding on the word embedding space for aspect extraction.  \\n\\nAspect-level Sentiment Classification In this sub-task, sentiment labels are assigned to each aspect term. Wang et al. (2016 ); Liu and Zhang (2017 ); Ma et al. (2017 ) proposed an attentionbased neural network model for aspect-level sentiment classification (ASC). Tay et al. (2018 ) modelled relationship between words and aspects using LSTM model ( Hochreiter and Schmidhuber ,1997) to improve ASC performance.He et al.(2018 ) showed that document knowledge transfer improved performance of ASC task. Chen and Qian (2019 ) proposed a transfer capsule network for transferring knowledge from document-level sentiment classification, while Hou et al. (2021 )adopted a dependency tree-based graph neural network to solve the ASC task.  \\n\\nAspect-oriented Opinion Extraction This task extracts opinion terms associated with aspect terms. Fan et al. (2019 ) designed a sequence label model which used LSTM ( Hochreiter and Schmidhuber ,1997 ) for aspect-oriented opinion extraction (AOE). Wu et al. (2020a ) proposed a tagging scheme for AOE task which uses CNN ( LeCun et al. ,1995 ), LSTM ( Hochreiter and Schmidhuber ,1997 ) and BERT ( Devlin et al. ,2019 ) for opinion extraction. Wu et al. (2020b ) proposed a transfer learning method for transferring knowledge from sentiment classification task to AOE task.  \\n\\nRecent works on ATSA have introduced more sub-tasks like aspect-opinion pair extraction, aspect-sentiment-opinion triplet extraction, aspectcategory-opinion-sentiment quadruple extraction, etc. Yan et al. (2021 ) proposed a BART ( Lewis et al. ,2020 ) -based model to solve all ATSA tasks. Cai et al. (2021 ) introduced a new task called, aspect-category-opinion-sentiment quadruple extraction, a BERT ( Devlin et al. ,2019 )-based model to deal with implicit aspects and opinion terms. Xu et al. (2021 ) proposed a new span-level method for the aspect-sentiment-opinion triplet extraction.\\n\\n# 2.2 Aspect Category Sentiment Analysis\\nAspect Category Sentiment Analysis (ACSA) finds aspect categories and their associated sentiments from a text. Research on this has been conducted on both Aspect Category Detection (ACD) and ACSA tasks. Ma et al. (2018 ) proposed a word attention-based hierarchical model which takes common-sense knowledge for solving ACSA task. Xue and Li (2018 ) presented a novel CNN ( LeCun et al. ,1995 )-based model for ACSA task. Liang et al. (2019 ) proposed an encoding scheme which was aspect-guided and able to perform aspectreconstruction. Sun et al. (2019 ) constructed an auxiliary text for aspects and reformed the ACSA as a classification task.  \\n\\nWang et al. (2020 ) proposed a novel dependency tree-based model and a relational graph attention network for encoding the sentences. Li et al. (2020b ) designed a multi-instance framework for multi-label ACSA task. Cai et al. (2020 ) reformed the task as sentiment-category with a two-layer hierarchy where the higher layer detected the sentiment while the lower layer detected the aspect category. Liang et al. (2021 ) presented a semisupervised framework having a beta distributionbased model. The model finds semantically related words from the context of a target aspect. Liu et al. (2021 ) solved the ACSA task as a text generative method using BART ( Lewis et al. ,2020 ). Zhang et al. (2021 ) presented aspect sentiment quad prediction task where ACSA was formulated as a paraphrase generation task.  \\n\\nAlmost all existing works on ACSA are based on supervised methods. In contrast, this work proposes a method for ACSA which does not require any labelled data and relies only on seed text for aspect class names.\\npaper_title: OpenAsp: A Benchmark for Multi-document Open Aspect-based Summarization\\npaper_metainfo: Conf_Paper_Meta_Data_EMNLP_2023_with_whole_text.db\\nchunk_id: 1\\n# 3 Task Formulation\\nFollowing prior work ( Ahuja et al. ,2022 ;Yang et al. ,2022 ), given a set of texts about a topic, we define an aspect as a central theme within a topic. The aspect can be referred by certain phrases, denoted aspect labels . As an example, Research in Antarctica and Territorial claims are aspect labels of the Antarctica topic (see Figure 1 ).  \\n\\nSimilar to previous work on ABS ( Hayashi et al. ,2021 ;Angelidis et al. ,2021 ), our aspect label is short and concise. In contrast, our aspect definition is open allowing ad-hoc aspects with freeform labels, contrary to having pre-defined domainspecific aspects. Relative to a query in queryfocused summarization (QFS; Dang ,2005 ), which might specify a complex information need, our aspects are restricted to relevant subtopics. ( Hayashi et al. ,2021 ;Angelidis et al. ,2021 ;Angelidis and Lapata ,2018 ).  \\n\\nThe OABS task definition follows previous work (Tan et al. ,2020 ;Yang et al. ,2022 ), and is extended to the multi-document setting as follows: Given a set of documents $D$ on the same topic and an arbitrary aspect label $a$ , the task is to output a short aspect-based summary $S^{a}$ . The summary should consolidate salient information from the document set that is relevant to the aspect.\\n\\n# 4 Annotation Protocol\\nAs emphasized in Section 2 , manually collecting aspect-based summaries is very costly. We propose a novel and cost-effective protocol for generating aspect-based multi-document summaries, executed through controlled crowdsourcing ( Roit et al. ,2020 ) and a specially-designed annotation tool ( Figure 3 in the Appendix). The key idea of our protocol is the extraction of gold aspect-based summaries from generic summaries in existing MDS datasets. Notably, the process is accomplished by reading the generic summary text only, as described below, while saving the strenuous need to read the entire set of source documents and to write the aspect-based summary from scratch.\\n\\n# 4.1 Collecting Aspects and Summaries\\nFrom an existing MDS dataset, we gather pairs consisting of a document set $D$ and a respective generic summary $G$ . An annotator reads $G$ and identifies prominent aspects within it, specified by aspect labels $a_{1},a_{2},...,a_{m}$ . For each identified aspect label $a_{i}$ , the annotator selects the relevant sentences from $G$ . The concatenation of these sentences, retaining the original sentence-order from $G$ , produces the corresponding aspect-based summary $S^{a_{i}}$ . Accordingly, we establish $m$ new aspect-based summaries for $D$ as instances for the dataset. Notice that a summary is abstractive with respect to $D$ , being comprised of sentences from the abstractive generic reference summary.  \\n\\nIn our process, we favor extraction of fewer but high quality aspects from a generic summary. Specifically, our protocol instructs annotators to detect the aspects that are central in the generic summary, and to avoid circumstantial aspects. Although our protocol does not exhaustively extract aspects for the topic, the main sub-topics found in the generic summary establish a reliable and sufficient sample of aspects for addressing the multidocument open ABS task, for training and evaluating models. The full annotation guidelines appear in Appendix A .  \\n\\nTable 2: The size of the O PEN A SP dataset splits. “# Topics” denotes the number of document sets, “# Instances” is the total number of aspect-based summaries, and “# Docs” is the total number of source documents.   \\n\\n\\n<html><body><table><tr><td>Split</td><td># Topics</td><td>#Instances</td><td>#DocS</td></tr><tr><td>Test</td><td>192</td><td>596</td><td>6,536</td></tr><tr><td>Valid</td><td>82</td><td>238</td><td>2,168</td></tr><tr><td>Train</td><td>145</td><td>476</td><td>4,878</td></tr></table></body></html>  \\n\\nCritically, the described protocol avoids reading through the full document set and writing text for the summary. Instead, each aspect summary comprises a subset of generic summary sentences. We suggest that summary quality is maintained since the extracted summaries are based on dependable generic gold summary sentences. The validity of our protocol is based on two assumptions: (1) the aspect-related sentences extracted from generic summaries cover well the prominent information about the aspect within the full source documentset; (2) the aspect-based summaries preserve the coherence borrowed from the source summaries. We show that these assumptions indeed hold by assessing our collected dataset in Section 6.1 .\\n\\n# 4.2 Curation Phase\\nWe propose an optional curation phase for cleaning the annotated aspect labels and corresponding summaries. The process encompasses a manual review, by an expert, of the aspect label and aspect-based summary only. The reviewer can edit the aspect label, remove irrelevant sentences from the summary, or completely reject the aspect. Similar to the annotation protocol, the curation phase avoids the expensive task of reading the source documents.\\n\\n# 5 The O PEN A SP Dataset\\n\\n# 5.1 Source Data\\nWe exploit 2 prominent MDS datasets that contain reference summaries with at least 200 words to demonstrate our protocol robustness: DUC, a high-quality and expert-annotated dataset, and MultiNews ( Fabbri et al. ,2019 ), with data scraped from newser.com . For MultiNews, we automatically filtered out samples with invalid source documents, to avoid consequential hallucinations in the summaries (see Appendix D.2 ). The large scale of MultiNews allowed further filtration to capture only instances with summaries of 350–880 words, to increase the potential yield of aspect-based summaries. For all source data, we excluded documentset instances that discuss topics presented as a list of related events (e.g., daily news briefs or various unrelated incidents of the same kind), since the generic summaries of such instances typically contain few subtopics, if any.\\npaper_title: Aspect-oriented Opinion Alignment Network for Aspect-Based Sentiment Classification\\npaper_metainfo: Conf_Paper_Meta_Data_ECAI_2023_with_whole_text.db\\nchunk_id: 0\\n# Aspect-oriented Opinion Alignment Network for Aspect-Based Sentiment Classification\\nXueyi Liu a , Rui Hou a , Yanglei Gan a , Da Luo a , Changlin Li a , Xiaojun $\\\\mathbf{Shi^{\\\\mathrm{b}}}$ and Qiao Liu a;\\\\* a University of Electronic Science and Technology of China bChina Academy of Electronics and Information Technology Abstract. Aspect-based sentiment classification is a crucial problem in fine-grained sentiment analysis, which aims to predict the sentiment polarity of the given aspect according to its context. Previous works have made remarkable progress in leveraging attention mechanism to extract opinion words for different aspects. However, a persistent challenge is the effective management of semantic mismatches, which stem from attention mechanisms that fall short in adequately aligning opinions words with their corresponding aspect in multi-aspect sentences. To address this issue, we propose a novel Aspect-oriented Opinion Alignment Network (AOAN) to capture the contextual association between opinion words and the corresponding aspect. Specifically, we first introduce a neighboring span enhanced module which highlights various compositions of neighboring words and given aspects. In addition, we design a multiperspective attention mechanism that align relevant opinion information with respect to the given aspect. Extensive experiments on three benchmark datasets demonstrate that our model achieves stateof-the-art results. The source code is available at https://github.com/ AONE-NLP/ABSA-AOAN.\\npaper_title: Aspect-oriented Opinion Alignment Network for Aspect-Based Sentiment Classification\\npaper_metainfo: Conf_Paper_Meta_Data_ECAI_2023_with_whole_text.db\\nchunk_id: 2\\n# 2 Related Work\\nAspect-based sentiment classification (ABSC) is a fine-grained sentiment analysis task that focuses on extracting sentiment polarity towards a specific aspect within a given context. Early ABSC methods [17, 18] relied on handcrafted features and failed to capture the intrinsic semantic associations between the given aspect and context.  \\n\\nRecently, various neural network-based approaches, such as Convolutional Neural Networks (CNNs) [13, 19], Recurrent Neural Networks (RNNs) [6, 20], and Memory Networks [7], have been proposed to model the semantic relation between the aspect and context in an implicit way. For instance, Tang et al. [6] introduced two LSTM-based models, namely TD-LSTM and TC-LSTM, which segmented the sentence into three parts: the preceding context words, the aspect, and the following context words. However, RNN-based and its variants methods face challenges in capturing long-distance contextual sentiment features when the aspect is far away from the opinion words, due to the limitation of sequential modeling.  \\n\\nWith this in mind, researchers have deployed attention mechanisms for the aspect-based sentiment classification (ABSC) task to capture long-distance semantic features through global modeling. Tang et al. [7] proposed a Deep Memory Network (MemNet) that utilizes an attention mechanism to explicitly capture the relevance of each contextual word with respect to the aspect and infer the sentiment polarity. However, the inherent defects of attention mechanisms cannot differentiate the correlations of contextual words with respect to the given aspect, leading to a semantic mismatch problem.  \\n\\nTo tackle the aforementioned issue, some works improved attention mechanisms by modeling a global perspective of sentence-level information [8] or interaction between aspect and context [21, 22]. Liu et al. [8] proposed a content attention-based aspect-based sentiment classification model (Cabasc) that captures crucial information about given aspects from a global perspective. Ma et al. [21] introduced an interactive attention network (IAN) that generates the representations of target and context interactively. Huang et al. [22] proposed an attention over attention networks (AOA) that learns attentions from both aspect-to-text and text-to-aspect, suggesting that opinion words are highly correlated with the aspect. However, they neglected the fact that the position information is also crucial for identifying the sentiment of the aspect.  \\n\\nTaking this into consideration, some researchers have introduced various position information and proximity strategies to improve the effectiveness of aspect-based sentiment classification (ABSC) models. Gu et al. [10] proposed a position-aware bidirectional attention network (PBAN) that gives more attention to neighboring words of the aspect than words with long distances, while Zhou et al. [11] proposed a position-aware hierarchical transfer (PAHT) model that utilizes position information from multiple levels. Chen et al. [12] adopted a proximity strategy that assumes a closer opinion word is more likely to be the actual modifier of the target and designed a recurrent attention network (RAM) to counter irrelevant information using weight decay mechanisms. However, these approaches may not encompass or emphasize all relevant opinion words, limiting the model’s ability to fully comprehend the contextual meaning.  \\n\\nAnother trend of research has explored the use of graph neural networks (GNNs) for modeling syntactic structures of sentence based on dependency trees. For instance, Zhang et al. [23] introduced aspectspecific graph convolutional networks (ASGCN) to handle aspectlevel sentiment classification tasks. Tian et al. [24] proposed a typeaware graph convolutional network (T-GCN) that utilizes an attentive layer ensemble to learn contextual information from different GCN layers. Li et al. [25] proposed a dual graph convolutional network (DualGCN) that simultaneously took syntax structures and semantic correlations into consideration. Although syntactic-based methods have achieved promising results, the imperfect parsing performance and randomness of input sentences inevitably introduce noise through the dependency tree.  \\n\\nPrior works [10, 12, 13] which take position information into consideration may fall short in scenarios where the opinion words are distant from the aspect or convey complex semantic information. In this paper, we do not rely on syntactic information and focus on the different compositions of aspect neighboring words, which provide comprehensive insights into the sentiment expressed.\\n\\n# 3 Methodology\\n\\n# 3.1 Overview\\nWe describe our model AOAN is this section, which has two main modules shown in Figure 2: a neighboring span enhanced module and a multi-perspective attention module. The neighboring span enhanced module highlights different compositions of neighboring words through multiple neighboring spans. The multi-perspective attention module captures relevant opinion words regarding the given aspect via multi-perspective sentiment representations. We will discuss each component in detail in the following sub-sections.  \\n\\n  \\nFigure 2. The overall architecture of AOAN, which is composed primarily of a neighboring span enhanced module highlights neighboring word spans of varying ranges, while a multi-perspective attention module captures relevant opinion words with a comprehensive view.\\n\\n# 3.2 Task Definition\\nThe aim of our model is to predict the sentiment polarity of a given sentence towards a given aspect, based on the contextual information in the sentence. Specifically, let $S=\\\\{w_{1},\\\\ldots,w_{n}\\\\}$ represent a sentence comprising $n$ words, and let $A=\\\\{w_{a+1},\\\\dots,w_{a+m}\\\\}$ denote the aspect mentioned in the sentence, consisting of mwords. Our goal is to accurately predict the sentiment polarity of the sentence $S$ towards the aspect $A$ from the set { positive ,neutral ,negative }.\\npaper_title: Counterfactual-Enhanced Information Bottleneck for Aspect-Based Sentiment Analysis\\npaper_metainfo: Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db\\nchunk_id: 2\\n# Related Work\\n\\n# Aspect-Based Sentiment Analysis\\nAs an essential task in natural language processing, sentiment analysis is commonly studied at document-level or sentence-level, which makes distinguishing sentiment polarities of different aspects in a single document or sentence difficult. To address this limitation, aspect-based sentiment analysis (ABSA), a fine-grained sentiment analysis task, is proposed to identify the sentiment polarity towards a specific aspect within a sentence or document.  \\n\\nSo far, deep neural networks have dominated the literature on ABSA. Earlier approaches centered around devising diverse attention mechanisms to learn attention-based representations of the context and the target aspect, which implicitly captured the semantic relationship between the given aspect and its context (Wang et al. 2016; Ma et al. 2017; Lei et al. 2019). For example, Wang et al. (2016) first proposed attention-based LSTMs to capture relevant sentiment information from the context given the target aspect. Ma et al. (2017) introduced an interactive attention to interactively learn the attention-aware representations of the target aspect and its context.  \\n\\nIn another trend, several studies focus on explicitly capturing syntax-aware features for the target aspect by leveraging syntactic knowledge and graph neural networks (Huang and Carley 2019; Wang et al. 2020; Tian, Chen, and Song 2021; Liang et al. 2022). The key idea of these methods involves exploiting the syntactic structures, such as syntax dependency trees, to build graphs. Then, graph convolutional networks (GCNs) (Tian, Chen, and Song 2021; Liang et al. 2022) or graph attention networks (GATs) (Huang and Carley 2019; Wang et al. 2020) can be utilized to aggregate sentiment information from the syntactically adjacent nodes to the target aspect node.  \\n\\nMore recently, the pre-trained language models (PLMs), such as BERT (Devlin et al. 2019) and RoBERTa (Liu et al. 2019), have been applied to ABSA and yielded state-of-theart results (Song et al. 2019; Jiang et al. 2019; Wang et al. 2020; Zhang, Zhou, and Wang 2022). These methods either employed BERT/RoBERTa as an embedding layer to acquire better initial embeddings (Wang et al. 2020; Jiang et al. 2019) or fine-tuned BERT/RoBERTa-based models by incorporating a task-specific classification layer (Xu et al. 2019). They absorbed the merit of rich linguistic and world knowledge contained in PLMs.\\n\\n# Spurious Correlation Reduction in NLP\\nDespite the preliminary success, deep neural networks are notoriously prone to learning spurious correlations between superficial feature patterns and the predicted label. For instance, models can achieve promising results in natural language inference (NLI) without capturing the semantic correlations between hypothesis and premises, due to the reliance on specific linguistic patterns in hypothesis (Gururangan et al. 2018) or superficial heuristics between the input text pairs (McCoy, Pavlick, and Linzen 2019). Similar biases have also been revealed in other tasks, including question answering (Jia and Liang 2017) and reading comprehension (Kaushik and Lipton 2018). These models are “right for the wrong reasons”, which results in poor robustness when the data distribution shifts.  \\n\\nExisting solutions to mitigate the spurious correlation problem can be roughly grouped into two categories: (1)  \\n\\n  \\nFigure 2: The overview of our CEIB, encompassing two primary modules: (a) counterfactual data augmentation module that employed LLM to generate the counterfactual data and (b) information bottleneck module with a factual-counterfactual balance setting to learn a more robust ABSA model.  \\n\\ndata augmentation (Zellers et al. 2018; Nie et al. 2020; Wang and Culotta 2021; Wu et al. 2022), and (2) ensemble learning (Clark, Yatskar, and Zettlemoyer 2020; Stacey et al. 2020; Sanh et al. 2021; Tian et al. 2022).  \\n\\nThe key idea of the data augmentation-based methods is to generate adversarial samples without the superficial patterns or spurious associations to alleviate the dataset bias, thus training more robust models. For instance, Zellers et al. (2018) proposed an adversarial filtering method to generate counterfactual samples and filter them in an adversarial way, which reduced spurious stylistic artifacts in the original dataset. Nie et al. (2020) augmented the original training dataset with human-written samples which exposed the model’s brittleness on spurious correlations in an iterative human-in-the-loop manner. Wang and Culotta (2021) introduced a dataset de-biasing paradigm from the causaltheoretic perspective, which generated causally counterfactual data to train debiased models.  \\n\\nEnsemble learning-based methods proposed to leverage bias-only models to capture superficial features or shallow patterns presented in the training data, and then train a debiased model with the detected spurious correlations. For instance, Stacey et al. (2020) designed a classifier to learn the biases and discouraged the hypothesis encoder from learning them, which in turn updated the biased classifier in an adversarial learning way. Clark, Yatskar, and Zettlemoyer (2020) leveraged a low-capacity model as the bias-only model to capture simple patterns and down-weighted the corresponding loss to train a more robust model via ensemble learning. Tian et al. (2022) detected the spurious correlations in the training dataset based on the causal inference theories and incorporated a new counterfactual model with the factual model to mitigate the bias.  \\n\\nIn this paper, we reduce spurious correlations for robust ABSA by taking benefits of both the data augmentationbased and ensemble learning-based approaches. We first generate counterfactual data where the spurious correlations do not hold in order to encourage the trained model to capture semantically relevant opinion words for the target aspect. Then, we employ the IB principle to balance the predictive information of the original factual data and the augmented counterfactual data to learn a more robust ABSA model in an ensemble manner.\\npaper_title: A Span-level Bidirectional Network for Aspect Sentiment Triplet Extraction\\npaper_metainfo: Conf_Paper_Meta_Data_EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing_with_whole_text.db\\nchunk_id: 1\\n# 2 Related Work\\nAspect based sentiment analysis (ABSA) is a fine-grained sentiment analysis task that consists of various subtasks, including aspect term extraction (ATE) [Ma et al. , 2019], opinion term extraction (OTE) [Wu et al. , 2020], aspect-level sentiment classification (ASC) [Li et al. , 2019b]. Since these subtasks are solved individually, recent studies attempted to couple two subtasks as a compound task, such as aspect term polarity co-extraction (APCE) [Li et al. , 2019a], aspect and opinion co-extraction [Yu et al. , 2019], aspect category and sentiment classification [Hu et al. , 2018], and aspect-opinion pair extraction (AOPE) [Gao et al. , 2021]. Although many works have achieved great progress on these tasks, none of these tasks aims to identify the aspect terms as well as their corresponding opinion term and sentiment polarity.  \\n\\nTo tackle this issue, [Peng et al. , 2020] proposed the aspect sentiment triplet extraction (ASTE) task, which aimed to extract aspect terms, the sentiments of the aspect terms, and the opinion terms causing the sentiments. Some methods [Xu et al. , 2020; Wu et al. , 2020] designed a unified tagging scheme to solve this task. Some others [Chen et al. , 2021; ?] formulated this task as a multi-turn machine reading comprehension task and solve it with machine reading comprehension frameworks. Recently, [Xu et al. , 2021] had propose a span-level model to extract ATs and OTs first and then predict the sentiment relation for each (AT, OT)p airs, which suffers from the similar distribution of the representation of the share-token spans and the complexity from exhaustive pairing of every aspect and opinion span candidates.\\n\\n# 3 Methodology\\nAs shown in Figure 2, our SBC framework consists of five parts: task definition, span generation, similar span separation loss, bidirectional cross-attention structure, and inference. The details of all parts are given in the following subsections.\\n\\n# 3.1 Task Definition\\nGiven a sentence $S=\\\\{w_{1},w_{2},\\\\ldots,w_{n}\\\\}$ consisting $n$ words, the goal of the ASTE task is to extract a set of aspect sentiment t $\\\\mathcal{T}=\\\\{(a,o,c)_{k}\\\\}_{k=1}^{|\\\\mathcal{T}|}$ from the given sentence $S$ ,where $(a,o,c)$ refers to (aspect term, opinion term, sentiment polarity) and $c\\\\in\\\\{P o s i t i v e,N e u t r a l,N e g a t i v e\\\\}$ .\\n\\n# 3.2 Span Generation\\nGiven a sentence in total. Each span $S$ with $\\\\mathbf{s}_{i}\\\\,=\\\\,\\\\bigl\\\\{w_{s t a r t(i)},\\\\cdot\\\\cdot\\\\cdot,w_{e n d(i)}\\\\bigr\\\\}$ $n$ tokens, there are $m$ possible spans is defined by all the tokens from $s t a r t(i)$ to $e n d(i)$ inclusive, and the maximum length of span $\\\\mathbf{s}_{i}$ is $l_{s}$ :  \\n\\n$$\\n1\\\\leq s t a r t(i)\\\\leq e n d(i)\\\\leq n\\n$$  \\n\\n$$\\ne n d(i)-s t a r t(i)\\\\leq l_{s}\\n$$  \\n\\nTo obtain span representations, we need to get the tokenlevel representations first. In this paper, we utilize BERT [Devlin et al. , 2018] as a sentence encoder to obtain token-level contextua sentence Szed representations . Then, the token-level representations are com$\\\\{\\\\mathbf{h}_{1},\\\\mathbf{h}_{2},\\\\dotsc,\\\\mathbf{h}_{n}\\\\}$ of the given bined by max pooling. Note that various methods can be applied to generate the representations for spans, the effectiveness of these span generation methods will be investigated in the ablation study in Appendix. We define the representation of span $\\\\mathbf{s}_{i}$ as:  \\n\\n$\\\\mathbf{g}_{i}=M a x\\\\left(\\\\mathbf{h}_{s t a r t}(i),\\\\mathbf{h}_{s t a r t+1}(i),\\\\ldots,\\\\mathbf{h}_{e n d}(i)\\\\right)$ where Max represents max pooling.  \\n\\n  \\nFigure 2: The overall architecture of the span-level bidirectional cross-attention (SBC) framework. The ‘Select Span Representation’ means that only the original span representations of aspect candidates and opinion candidates are passed to aspect attention module and opinion attention module, respectively. The blue arrows and modules as well as red arrows and modules indicate the extraction of aspect-to-opinion direction and the opinion-to-aspect direction, respectively.\\n\\n# 3.3 Similar Span Separation Loss\\nAfter generating the representation of span, most previous models directly use the span representations for downstream tasks. However, enumerating all possible spans in a sentence inevitably generates lots of spans that have same tokens with some others, and the model may suffer from the limitations in processing these similar spans due to their adjacent distribution. To separate these spans with similar distributions, we propose a similar span separation loss function based on KL divergence for separating spans with shared tokens, as shown in Figure 2. The similar span separation loss is defined as:  \\n\\n$$\\nK L(\\\\mathbf{g}_{i}||G_{i})=\\\\sum_{j}^{G_{i}}s o f t m a x(\\\\mathbf{g}_{i})l o g\\\\frac{s o f t m a x(\\\\mathbf{g}_{i})}{s o f t m a x(\\\\mathbf{g}_{j})}\\n$$  \\n\\n$$\\nK L(G_{i}||\\\\mathbf{g}_{i})=\\\\sum_{j}^{G_{i}}s o f t m a x(\\\\mathbf{g}_{j})l o g\\\\frac{s o f t m a x(\\\\mathbf{g}_{j})}{s o f t m a x(\\\\mathbf{g}_{i})}\\n$$  \\n\\n$$\\n\\\\mathcal{J}_{K L}=\\\\sum_{i}^{m}l o g(1+\\\\frac{2}{K L(G_{i}||\\\\mathbf{g}_{i})+K L(\\\\mathbf{g}_{i}||G_{i})})\\n$$  \\n\\nwhere $G_{i}$ indicates the set of the representation of spans which share at least one token with $\\\\mathbf{s}_{i}$ .\\n\\n# 3.4 Bidirectional Cross-attention Structure\\nAs the aspect sentiment triplet can be triggered by an AT or an OT, we further design a bidirectional cross-attention structure to decode the span representations. As shown in Figure 2, the bidirectional cross-attention structure consists of an aspect decoder and an opinion decoder. The details of each component of bidirectional cross-attention structure are given in the following subsections.\\npaper_title: Aspect-Based Sentiment Analysis with Explicit Sentiment Augmentations\\npaper_metainfo: Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db\\nchunk_id: 0\\n# Aspect-Based Sentiment Analysis with Explicit Sentiment Augmentations\\nJihong Ouyang , Zhiyao $\\\\mathbf{Yang}^{1,2*}$ , Silong Liang , Bing Wang , Yimeng Wang 1 , Ximing $\\\\mathbf{Li}^{1,2\\\\dagger}$  \\n\\n1 College of Computer Science and Technology, Jilin University, China 2 Key Laboratory of Symbolic Computation and Knowledge Engineering of MOE, Jilin University, China {ouyj $@$ , zhiyaoy $^{20\\\\@}$ mails., liangsl23 $@$ mails. }jlu.edu.cn, {wangbing1416, wangyimeng116, liximing86 }@gmail.com\\n\\n# Abstract\\nAspect-based sentiment analysis (ABSA), a fine-grained sentiment classification task, has received much attention recently. Many works investigate sentiment information through opinion words, such as “good” and “bad”. However, implicit sentiment data widely exists in the ABSA dataset, whose sentiment polarity is hard to determine due to the lack of distinct opinion words. To deal with implicit sentiment, this paper proposes an ABSA method that integrates explicit sentiment augmentations (ABSA-ESA) to add more sentiment clues. We propose an ABSA-specific explicit sentiment generation method to create such augmentations. Specifically, we post-train T5 by rule-based data and employ three strategies to constrain the sentiment polarity and aspect term of the generated augmentations. We employ Syntax Distance Weighting and Unlikelihood Contrastive Regularization in the training procedure to guide the model to generate the explicit opinion words with the same polarity as the input sentence. Meanwhile, we utilize the Constrained Beam Search to ensure the augmentations are aspect-related. We test ABSA-ESA on two ABSA benchmarks. The results show that ABSA-ESA outperforms the SOTA baselines on implicit and explicit sentiment accuracy.\\n\\n# Introduction\\nA spectbased Sentiment A nalysis ( ABSA ) aims to induce predictive models over manually annotated sentences to identify the sentiment polarity towards each specific aspect term (Wang et al. 2022a; Li et al. 2022). Taking the second sentence in Fig. 1 (a) as an example, the task aims to automatically identify the sentiment polarities of its aspect terms “outside ” ( Negative ) and “ atmosphere ” ( Positive )potentially with the corresponding opinion words “crushed” and “nice”. Due to its popularity, ABSA has been widely applied in many real-world scenarios, and accordingly, it is one of the most significant tasks in the natural language processing community (Yang et al. 2023; Ouyang et al. 2023).  \\n\\nTo handle the task of ABSA, many studies have been investigated during the past decade. Broadly speaking, the focus of recent work is on how to generate more discriminative  \\n\\n(a) 1. The fried rice (Positive) is amazing here . 2. It is crushed at outside (Negative), but the minute you walk inside, it has a nice atmosphere (Positive).   \\n(b) $\\\\vec{1}$ . Our server checked on us maybe twice during the entire meal (Negative). 2. All the money went into the interior decoration (Positive), none of it went to the chefs (Negative).  \\n\\nrepresentations for aspect terms to enhance the identification performance of sentiment polarity. Some early studies generate strong aspect term representations by directly employing deep neural encoders, such as LSTM (Tang et al. 2016; Wang et al. 2016; Cheng et al. 2017) and pre-trained language models (Xu et al. 2020; Dai et al. 2021). Beyond them, to further link the aspect terms and opinion words, some studies build dependency trees of sentences and then generate aspect term representations by employing graph convolution networks (GCN) (Sun et al. 2019; Wang et al. 2020; Chen, Teng, and Zhang 2020; Li et al. 2021a).  \\n\\nThe success of the GCN-based approach underscores the pivotal role that opinion words play in the realm of ABSA. However, recent research has highlighted a complex scenario characterized by a lack of distinct opinion words, termed ”implicit sentiment” (Li et al. 2021b; Wang et al. 2022b). To delve into this phenomenon, we select four examples from the Rest.14 to compare the implicit and explicit sentiment sentences. In the context of Fig.1(a), the sentiment is discernible due to distinct opinion words. In contrast, as shown in Fig.1(b), unraveling the sentiment associated with aspect terms such as ”meal,” ”interior decoration,” and ”chefs” is challenging. Implicit sentiment is a prevalent occurrence within ABSA datasets and it is hard to deal with (Li et al. 2021b).  \\n\\nTo tackle the challenge mentioned above, in the paper, we design a novel ABSA method by integrating Explicit Sentiment A ugmentations ( ABSA-ESA ). Such augmentations provide more sentiment clues for predicting sentiment polarity. We add them after the corresponding input sentence, forming new ABSA training data. To obtain the augmentations, we design an ABSA-specific explicit sentiment generation method. We aim to generate the sentences explicitly conveying the same sentiment polarity as their corresponding input sentences, targeting the same (or similar) aspect terms . We post-train the generation model T5 (Raffel et al. 2020) by the rule-based data selected in the ABSA dataset, making the generated augmentations comply with the above requirements. Furthermore, we introduce three strategies to confine the generated augmentations about their sentiment polarity and aspect terms. Specifically, in the training procedure, we employ the Syntax Distance Weighting and Unlikelihood Contrastive Regularization to lead the model to generate explicit opinion words with the same polarity as the input sentence. Subsequently, when engendering the augmentations, we employ the Constrained Beam Search to ensure the augmentations are aspect-related.  \\n\\nTo sum up, our contributions can be listed as follows:  \\n\\n• We propose a novel ABSA framework named ABSAESA, which focuses on solving the implicit sentiment issue by generating explicit sentiment augmentations.   \\n• We propose an ABSA-specific explicit sentiment generation method that generates augmentations with distinct opinion words for specific aspect terms.   \\n• Empirical results on two ABSA benchmarks show that ABSA-ESA outperforms other methods on both explicit and implicit accuracy.\\npaper_title: Aspect-oriented Opinion Alignment Network for Aspect-Based Sentiment Classification\\npaper_metainfo: Conf_Paper_Meta_Data_ECAI_2023_with_whole_text.db\\nchunk_id: 1\\n# 1 Introduction\\nThe main purpose of aspect-based sentiment classification (ABSC) is to judge the sentiment polarity (positive, negative, neutral) [1, 2] of aspect words in sentences expressing opinions. ABSC is an entitylevel oriented and fine-grained challenge for sentiment analysis. To illustrate, consider the following sample sentence taken from the SemEval 2014 restaurant dataset:  \\n\\nFood is very good, though I occasionally wondered about freshness of raw vegetables in side orders .  \\n\\nIn this sentence, the aspect are \" food \" and \" raw vegetables \", and the expected sentiment polarities of these aspects are intended to be positive and negative. Identifying the sentiment polarity of aspect is crucial for applications [3, 4] such as product review analysis, where understanding customers’ opinions on specific aspects of a product can provide valuable insights for businesses.  \\n\\nTo solve the aspect-based sentiment classification (ABSC) task, it is crucial to establish the semantic relationship between an aspect and its corresponding opinion words. Various recurrent neural networks (RNNs) [5, 6] have been proposed to learn representations directly from the left or/and right context with regard to the given aspect. However, prior studies have faced difficulties in accurately establishing semantic associations between aspect and long-distance context on sequential modeling. Therefore, attention mechanism have been widely adopted in ABSC tasks to model the correlations between aspect and context [7, 8, 9]. Unlike RNN-based models, attention mechanisms possess global modeling capability, which allows them to capture long-distance dependencies between aspect and context. However, attention mechanisms may not be effective when dealing with sentences containing multiple aspects. For instance, the given aspect \" raw vegetables \" may be associated with the opinions words \"very good \" and \" though \" simultaneously. In such cases, attention mechanisms may struggle to align opinion words with their corresponding aspect, resulting in semantic mismatch [8].  \\n\\n  \\nFigure 1. An example sentence contains two aspects but with opposite sentiment polarities from the restaurant reviews.  \\n\\nTo address this issue, various works introduce position information (e.g. fixed size window) [10, 11] and proximity strategy (e.g. position weight decay) [12, 13], which have proved that context words in closer proximity are more likely to be the actual opinion words of the aspect. However, these approaches may not encompass or emphasize all relevant opinion words, limiting the model’s ability to fully comprehend the contextual meaning. As shown in Figure 1, the aspect \"food \" has the opinion words \" very good \" that are relatively close to the aspect and easy to capture its sentiment by the aforementioned methods. However, accurately calculating the sentiment of the aspect \"raw vegetables \" requires the capture of a more comprehensive set of semantic information, including the opinion word \" though \", which is much farther away from the aspect. Thus, the way prior works take position information into consideration may fall short in scenarios where the opinion words are distant from the aspect or convey complex semantic information. Therefore, the challenge remains on how to utilize attention mechanisms to accurately capture and match appropriate opinion words with respect to the given aspect .  \\n\\nDrawing on the insights from recent studies on the critical role of different semantic compositionalities which can improve expressive ability and provide syntactic structure for natural language understanding [14], as well as the demonstrated effectiveness of spanlevel information in aspect-based sentiment classification (ABSC) [15, 16], we present a novel Aspect-oriented Opinion Alignment Network (AOAN) for aspect-based sentiment classification. Our proposed model addresses the issue of semantic mismatch by introducing a neighboring span enhanced module to highlights a variety of neighboring words compositions with respect to aspect. These compositions are used to emphasize different ranges of aspect neighboring words, providing flexibility to the contextual association between the aspect and neighboring words. To capture more comprehensive relevant opinion words based on different compositions of neighboring words, we then propose a multi-perspective attention module that utilizes abstract understanding representations to model multiperspective sentiment representations of each aspect. This parallel attention mechanism improves the accuracy and comprehensiveness of capturing the relevant opinion words regarding the given aspect. Finally, the multi-perspective sentiment representations are combined by a global average pooling layer, which aggregates the information from all neighboring spans, providing a comprehensive representation of the overall sentiment expressed by the given aspect. The main contributions can be summarized as follows:  \\n\\n•We propose an Aspect-oriented Opinion Alignment Network (AOAN), a novel framework designed to mitigate the semantic mismatch problem. This method is capable of exploiting contextual association of different neighboring spans and guarantees proper alignment between opinion words and the given aspect. •Our propose AOAN employs a neighboring span enhanced module that highlights various compositions of neighboring words and given aspects, enabling the capture of more evident information related to a given aspect. Additionally, a multi-perspective attention module is designed to align comprehensive opinion information with the given aspect in a parallel way. •We conduct extensive experiments on three benchmark datasets to evaluate the effectiveness of our approach. Experimental results demonstrate that our model outperforms the state-of-the-art methods, which confirms the efficacy of our proposed approach.\\npaper_title: MvP: Multi-view Prompting Improves Aspect Sentiment Tuple Prediction.\\npaper_metainfo: Conf_Paper_Meta_Data_ACL_2023_with_whole_text.db\\nchunk_id: 7\\n# Listing 2: Few-shot Prompt (10 shots) for ASQP (R15).\\nAccording to the following sentiment elements definition:  \\n\\n−The \\'aspect term\\' refers to a specific feature, attribute, or aspect of a product or service that a user may express an opinion about, the aspect term might be \\'null\\' for implicit aspect.  \\n\\n−The \\'opinion term\\' refers to the sentiment or attitude expressed by a user towards a particular aspect or feature of a product or service, the aspect term might be \\'null\\' for implicit opinion.  \\n\\n−The \\'aspect category\\' refers to the category that aspect belongs to, and the available catgories includes: \\'location general\\', \\' food prices\\', \\'food quality\\', \\'food general\\', \\'ambience general\\', \\'service general\\', \\'restaurant prices\\', \\'drinks prices\\', \\' restaurant miscellaneous\\', \\'drinks quality\\', \\'drinks style_options\\', \\'restaurant general\\' and \\'food style_options\\'.  \\n\\n−The \\'sentiment polarity\\' refers to the degree of positivity, negativity or neutrality expressed in the opinion towards a particular aspect or feature of a product or service, and the available polarities inlcudes: \\'positive\\', \\'negative\\' and \\'neutral\\'.  \\n\\nRecognize all sentiment elements with their corresponding aspect terms, aspect categories, opinion terms and sentiment polarity in the following text with the format of [(\\'aspect term\\', \\'opinion term\\', \\'aspect category\\', \\'sentiment polarity\\'), ...]:  \\n\\nText: never again ! Sentiment Elements: [(\\'null\\', \\'never\\', \\'restaurant general\\', \\'bad\\')] Text: the food was mediocre at best but it was the horrible service that made me vow never to go back . Sentiment Elements: [(\\'food\\', \\'mediocre\\', \\'food quality\\', \\'bad\\'), (\\'service\\', \\'horrible\\', \\'service general\\', \\'bad\\')] Text: we had the lobster sandwich and it was fantastic . Sentiment Elements: [(\\'lobster sandwich\\', \\'fantastic\\', \\'food quality\\', \\'great\\')]  \\n\\nText: they have it all −−great price , food , and service .   \\nSentiment Elements: [(\\'null\\', \\'great\\', \\'restaurant prices\\', \\'great\\'), (\\'food\\', \\'great\\', \\'food quality\\', \\'great\\'), (\\'service\\', \\'great\\', \\'service general\\', \\'great\\')]  \\n\\nText: they even scoop it out nice ( for those on a diet ) not too much not to little . Sentiment Elements: [(\\'null\\', \\'nice\\', \\'food style_options\\', \\'great\\')]  \\n\\nText: also it \\'s great to have dinner in a very romantic and comfortable place , the service it \\'s just perfect ... they \\'re so frendly that we never want to live the place !   \\nSentiment Elements: [(\\'place\\', \\'romantic\\', \\'ambience general\\', \\'great\\'), (\\'place\\', \\'comfortable\\', \\'ambience general\\', \\'great\\'), (\\' service\\', \\'perfect\\', \\'service general\\', \\'great\\')]  \\n\\nText: my friend from milan and myself were pleasantly surprised when we arrived and everyone spoke italian . Sentiment Elements: [(\\'null\\', \\'pleasantly surprised\\', \\'restaurant miscellaneous\\', \\'great\\')]  \\n\\nText: i had their eggs benedict for brunch , which were the worst in my entire life , i tried removing the hollondaise sauce completely that was how failed it was .   \\nSentiment Elements: [(\\'eggs benedict\\', \\'worst\\', \\'food quality\\', \\'bad\\')]  \\n\\nText: the food is authentic italian −delicious ! Sentiment Elements: [(\\'food\\', \\'authentic italian\\', \\'food quality\\', \\'great\\'), (\\'food\\', \\'delicious\\', \\'food quality\\', \\'great\\')] Text: a little pricey but it really hits the spot on a sunday morning ! Sentiment Elements: [(\\'null\\', \\'pricey\\', \\'restaurant prices\\', \\'bad\\'), (\\'null\\', \\'hits the spot\\', \\'restaurant general\\', \\'great\\')]  \\n\\n<html><body><table><tr><td>Task</td><td>Dataset</td><td>#Cat</td><td>Train Dev (POS/NEU/NEG) (POS/NEU/NEG)</td><td>Test (POS/NEU/NEG)</td></tr><tr><td rowspan=\"2\">ASQP</td><td>Rest15 13</td><td>834 1,005/34/315</td><td>209 252/14/81</td><td>537 453/37/305</td></tr><tr><td>Rest16 13</td><td>1,264 1,369/62/558</td><td>316 341/23/143</td><td>544 584/40/177</td></tr><tr><td rowspan=\"2\">ACOS</td><td>Laptop 121</td><td>2,934 2,583/227/1,364</td><td>326 279/24/137</td><td>816 716/65/380</td></tr><tr><td>Restaurant 13</td><td>1,530 1,656/95/733</td><td>171 180/12/69</td><td>583 668/44/205</td></tr><tr><td rowspan=\"4\">ASTE</td><td>Laptop14</td><td>906 817/126/517</td><td>219 169/36/141</td><td>328 364/63/116</td></tr><tr><td>Rest14</td><td>1,266 1,692/166/480</td><td>310 404/54/119</td><td>492 773/66/155</td></tr><tr><td>Rest15</td><td>605 783/25/205</td><td>148 185/11/53</td><td>322 317/25/143</td></tr><tr><td>Rest16</td><td>857 1,015/50/329</td><td>210 252/11/76</td><td>326 407/29/78</td></tr><tr><td rowspan=\"2\">TASD</td><td>Rest15 13</td><td>1,120 1,198/53/403</td><td>10 6/0/7</td><td>582 454/45/346</td></tr><tr><td>Rest16 13</td><td>1,708 1,657/101/749</td><td>29 23/1/20</td><td>587 611/44/204</td></tr></table></body></html>\\n\\nTable 9: Dataset statistics for various tasks. #Cat refers to the number of aspect categories in the set. POS, NEU, and NEG denote the number of positive, neutral and negative quads or triplets respectively.\\npaper_title: A Span-level Bidirectional Network for Aspect Sentiment Triplet Extraction\\npaper_metainfo: Conf_Paper_Meta_Data_EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing_with_whole_text.db\\nchunk_id: 2\\n# Aspect-to-opinion Direction\\nIn aspect-to-opinion direction (Blue arrows and modules in Figure 2), at first, the aspect decoder aims to extract all ATs along with their sentiment from the sentence. We can obtain the confidence score as well as the probability of the sentiment of AT as follows:  \\n\\n$$\\nu_{i}^{a}=F F N N_{a}\\\\left(\\\\mathbf{g}_{i},\\\\theta_{a}\\\\right)\\n$$  \\n\\n$$\\nq_{i}^{a\\\\rightarrow o,a}=\\\\mathbf{w}_{a\\\\rightarrow o,a}u_{i}^{a}\\n$$  \\n\\n$$\\n\\\\mathbf{p}_{i}^{a\\\\rightarrow o,a}=s o f t m a x(q_{i}^{a\\\\rightarrow o,a})\\n$$  \\n\\nwhere $F F N N_{A}$ represents the FFNN of aspect decoder, $\\\\theta_{a}$ is the parameter for the FFNN, $\\\\begin{array}{r l r}{\\\\mathbf{w}_{a\\\\rightarrow o,a}}&{{}\\\\in}&{\\\\mathbb{R}^{m\\\\times c^{*}}}\\\\end{array}$ is a trainable weight vector, and $c^{*}\\\\ \\\\in\\\\ \\\\{P o s i t i v e,N e u t r a l,N e g a t i v e,N u l\\\\bar{l}\\\\}$ of sentiment polarity, ∈{ Null here means that the correspondis the number ing span is not a valid AT.  \\n\\nAfter that, giving a set $G_{a}$ of original span representations of all valid ATs identify all OTs along with their sentiment for each particular $\\\\mathbf{g}_{j}^{a}\\\\,\\\\in\\\\,G_{a}$ ∈, we apply the opinion decoder to valid AT by exploiting attention mechanism. Similarly, we obtain the probability distribution of the OT’s sentiment along with its confidence score by:  \\n\\n$$\\nu_{i}^{o}=F F N N_{o}\\\\left(\\\\mathbf{g}_{i},\\\\theta_{o}\\\\right)\\n$$  \\n\\n$$\\n\\\\alpha_{i,j}^{a\\\\rightarrow o}=\\\\frac{\\\\exp(u_{i}^{o})}{\\\\exp(\\\\mathbf{g}_{j}^{a})}\\n$$  \\n\\n$$\\nq_{i,j}^{a\\\\rightarrow o,o}=\\\\mathbf{w}_{a\\\\rightarrow o,o}\\\\left(u_{i}^{o}+\\\\alpha_{i,j}^{a\\\\rightarrow o}\\\\cdot\\\\mathbf{g}_{j}^{a}\\\\right)\\n$$  \\n\\n$$\\n\\\\mathbf{p}_{i,j}^{a\\\\rightarrow o,o}=s o f t m a x(q_{i,j}^{a\\\\rightarrow o,o})\\n$$  \\n\\nwhere $F F N N_{o}$ represents the FFNN of opinion decoder, $\\\\theta_{o}$ is the parameter for the FFNN, $\\\\mathbf{w}_{a\\\\rightarrow o,o}\\\\in\\\\mathbb{R}^{m\\\\times c^{*}}$ is a trainable weight vector. Furthermore, define the loss of aspect-toopinion direction as:  \\n\\n$$\\n\\\\begin{array}{r l r}{\\\\lefteqn{\\\\mathcal{T}_{a\\\\to o}=-\\\\sum_{i}y_{i}^{a\\\\to o,a}\\\\log\\\\big(q_{i}^{a\\\\to o,a}\\\\big)}}\\\\\\\\ &{}&{\\\\quad-\\\\displaystyle\\\\sum_{i}\\\\sum_{j}y_{i,j}^{a\\\\to o,o}\\\\log\\\\big(q_{i,j}^{a\\\\to o,o}\\\\big)}\\\\end{array}\\n$$  \\n\\nwhere $y_{i}^{a\\\\to o,a}$ and $y_{i,j}^{a\\\\rightarrow o,o}$ are ground truth labels of the sentiments for AT and OT given a specific valid AT, respectively.\\n\\n# Opinion-to-aspect Direction\\nAs for opinion-to-aspect direction (Red arrows and modules in Figure 2), the opinion decoder is deployed first to extracts all the OTs along with their sentiment from the sentence. To minimize the number of model parameters, the opinion decoder in both aspect-to-opinion and opinion-to-aspect directions shares the FFNN features, as described in Equation (10). The probability distribution of the sentiments of OTs as well as the confidence scores can be obtained as:  \\n\\n$$\\nq_{i}^{o\\\\rightarrow a,o}=\\\\mathbf{w}_{o\\\\rightarrow a,o}u_{i}^{o}\\n$$  \\n\\n$$\\n\\\\mathbf{p}_{i}^{o\\\\rightarrow a,o}=s o f t m a x(q_{i}^{o\\\\rightarrow a,o})\\n$$  \\n\\nwhere $\\\\mathbf{w}_{o\\\\rightarrow a,o}\\\\in\\\\mathbb{R}^{m\\\\times c^{*}}$ is a trainable weight vector.  \\n\\nGiven a set $G_{o}$ if original span representations of all valid OTs $\\\\mathbf{g}_{j}^{o}~\\\\in~G_{o}$ , the aspect decoder is deployed to identify the ATs and their sentiment for each particular valid OTs. Note that the aspect decoder in opinion-to-aspect direction also shares same FFNN features described in Equation (7) with the aspect decoder in aspect-to-opinion direction. The logits of ATs and their confidence scores in opinion-to-aspect direction can be obtained by:  \\n\\n$$\\n\\\\alpha_{i,j}^{o\\\\rightarrow a}=\\\\frac{\\\\exp(u_{i}^{a})}{\\\\exp(\\\\mathbf{g}_{j}^{o})}\\n$$  \\n\\n$$\\nq_{i,j}^{o\\\\to a,a}={\\\\bf w}_{o\\\\to a,a}\\\\left(u_{i}^{a}+\\\\alpha_{i,j}^{o\\\\to a}\\\\cdot{\\\\bf g}_{j}^{o}\\\\right)\\n$$  \\n\\n$$\\n\\\\mathbf{p}_{i,j}^{o\\\\rightarrow a,a}=s o f t m a x(q_{i,j}^{o\\\\rightarrow a,a})\\n$$  \\n\\nwhere $\\\\mathbf{w}_{o\\\\rightarrow a,a}\\\\in\\\\mathbb{R}^{m\\\\times c^{*}}$ is a trainable weight vector.  \\n\\nFinally, the loss for opinion-to-aspect direction is defined as:  \\n\\n$$\\n\\\\begin{array}{l}{{\\\\displaystyle{\\\\mathcal{T}}_{o\\\\rightarrow a}=-\\\\sum_{i}y_{i}^{o\\\\rightarrow a,o}\\\\log\\\\left(q_{i}^{o\\\\rightarrow a,o}\\\\right)}}\\\\\\\\ {{\\\\displaystyle-\\\\sum_{i}\\\\sum_{j}^{G_{o}}y_{i,j}^{o\\\\rightarrow a,a}\\\\log\\\\left(q_{i,j}^{o\\\\rightarrow a,a}\\\\right)}}\\\\end{array}\\n$$  \\n\\nwhere $y_{i}^{o\\\\rightarrow a,o}$ and $y_{i,j}^{o\\\\rightarrow a,a}$ are the ground truth labels. Then, we combine the above loss functions to form the loss objective of the entire model:  \\n\\n$$\\n\\\\mathcal{T}=\\\\mathcal{J}_{K L}+\\\\mathcal{J}_{a\\\\rightarrow o}+\\\\mathcal{J}_{o\\\\rightarrow a}\\n$$\\n\\n# 3.5 Inference\\nUse Sto generate all possible span representations $G\\\\,=$ $\\\\{\\\\mathbf{g}_{1},\\\\mathbf{g}_{2},\\\\ldots,\\\\mathbf{g}_{m}\\\\}$  \\n\\nfor $(\\\\beta,\\\\gamma)\\\\in[(a s p e c t,o p i n i o n)$ ∈,(opinion, aspect )] do Input Gto the $\\\\beta$ decoder described in Section 3.4, output the va $\\\\beta$ candidates $\\\\beta_{i}\\\\,\\\\in\\\\,G_{\\\\beta}$ , the cor ponding sentiment $c_{i}^{\\\\beta}$ and the score of each sentiment $q_{i}^{\\\\beta}$ ;for $\\\\beta_{i}\\\\in G_{\\\\beta}^{\\\\,^{\\\\prime}}$ do  \\n\\nInput Gand the span representation $g_{i}^{\\\\beta}$ of valid $\\\\beta$ candidates $\\\\beta_{i}\\\\,\\\\in\\\\,G_{\\\\beta}$ to the $\\\\gamma$ decode utput the valid $\\\\gamma$ candidates $\\\\gamma_{i,j}$ of the given span $\\\\beta_{i}$ , the corresponding sentiment $\\\\dot{c}_{i,j}^{\\\\gamma}$ and the score of each sentiment $q_{i,j}^{\\\\gamma}$ ;  \\n\\nAlgorithm 1 Inference Algorithm for ASTE of the SBC Framework  \\n\\nDuring training, the ground truth of all ATs, OTs, and their corresponding sentiment polarities are already known. Therefore, our model does not form the triplets during the training process. However, in the inference process of each direction, our model identify the triples in a pipeline. For more precise description of the inference process, we propose a inference algorithm to show the determination of the sentiment of each triplet and the combination of the triplet results from two directions. As illustrated in Algorithm 1, the final sentiment polarity of each triplet is determined based on the confidence scores of the corresponding sentiments in AT and OT extraction. And the final aspect sentiment triplets are the concatenation of the triplets in both aspect-to-opinion and opinionto-aspect directions.  \\n\\nelse  \\n\\n$$\\n\\\\tau\\\\leftarrow\\\\tau\\\\cup(\\\\beta_{i},\\\\gamma_{i,j},c)\\n$$\\n\\n# 4 Experiments\\n\\n# 4.1 Datasets\\nTo verify the effectiveness of our proposed SBC framework, we conduct experiments on four benchmark datasets [Xu et al. , 2020], which are constructed based on the original SemEval ABSA Challenges and the datasets of [Fan et al. ,2019]. Table 1 lists the statistics of these datasets.\\npaper_title: Aspect-Based Sentiment Analysis with Explicit Sentiment Augmentations\\npaper_metainfo: Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db\\nchunk_id: 1\\n# Related Work\\n\\n# Aspect-based Sentiment Analysis\\nAspect-Based Sentiment Analysis (ABSA) methods primarily focus on integrating sentiment information from contextual words into aspect terms. In earlier approaches, this was often achieved by utilizing LSTM or Bi-LSTM as encoders (Tang et al. 2016; Wang et al. 2016; Cheng et al. 2017). Consequently, recent advancements have embraced the Attention mechanism as the preferred encoder (Tang et al. 2016; Wang et al. 2016; Cheng et al. 2017). Notably, leveraging pre-trained language models has emerged as the prevailing trend in ABSA (Xu et al. 2020; Dai et al. 2021). Furthermore, to establish stronger connections between aspect terms and opinion words, numerous studies have delved into constructing dependency trees within sentences and refining aspect term representations using Graph Convolutional Networks (GCNs) (Sun et al. 2019; Wang et al. 2020; Li et al. 2021a).  \\n\\nConcurrently, alongside developing robust encoders, researchers have explored the enrichment of training data to provide external sentiment information for the model (He et al. 2019; Wang et al. 2022a; Yang et al. 2023). These additional data often lack fine-grained annotations and necessitate subsequent data processing. Addressing this, this paper integrates ABSA-specific augmentations into ABSA models, bypassing the need for extensive reprocessing.\\n\\n# Implicit Sentiment Analysis\\nImplicit sentiment classification, a pivotal subfield within sentiment analysis, was pioneered by Liu (2012), drawing significant scholarly interest. Initial works revolved around implicit sentiment at the sentence level (Deng, Wiebe, and Choi 2014; Choi, Wiebe, and Mihalcea 2017; Zhou et al. 2021a; Xu et al. 2022). Recent endeavors have shifted towards tackling implicit aspect-based sentiment classification (Li et al. 2021b; Wang et al. 2022b; Fei et al. 2023). A prevailing approach involves incorporating external knowledge to capture sentiment expression patterns. For instance, Xu et al. (2022) integrates external sentiment-related knowledge into sentence features, enhancing the model’s sentiment comprehension. Similarly, Li et al. (2021b) employs a post-training strategy with BERT, leveraging contrastive learning on expansive sentiment-annotated corpora. ABSAESA utilizes the data generated by the model instead of obtaining external knowledge.\\n\\n# Data Augmentation\\nWithin NLP, the data augmentation technique has gained substantial traction to expand the pool of available training instances. This approach finds widespread application across diverse domains, including text classification (Wu et al. 2022; Liu et al. 2022; Ouyang et al. 2022), neural machine translation (Lam, Schamoni, and Riezler 2022; Kambhatla, Born, and Sarkar 2022; Gao et al. 2019), and text generation (Bi, Li, and Huang 2021; Xu et al. 2021). Notably, recent strides in ABSA have similarly leveraged data augmentation (Chen, Faulkner, and Badyal 2022; Wang et al. 2022a; Hsu et al. 2021). However, their augmentation techniques tend to be relatively simple, e.g., token replacement, masked aspect prediction, and polarity reversal, limiting the semantic diversity of the enhanced samples. The augmentation method in this paper is based on the language model, which generates augmentations with rich sentiment information.\\n\\n# Our Proposed ABSA-ESA Model\\nIn this section, we introduce the proposed ABSA method named ABSA-ESA .\\n\\n# Overall Framework\\nGenerally speaking, ABSA methods take the review sentence $\\\\mathbf{s}\\\\doteq\\\\{s_{j}\\\\}_{j=1}^{M}$ and its corresponding aspect term $\\\\textbf{a}=$ $\\\\{a_{j}\\\\}_{j=1}^{|{\\\\bf a}|}$ as the model input, $M$ denotes the length of all polarity $y\\\\ \\\\in\\\\ \\\\mathcal{Y}\\\\ =$ $\\\\{\\\\mathsf{P o s i t i v e,N e g a t i v e,N e u t r a l}\\\\}$ {}for a . To deal with the sentences containing implicit sentiment, we extend this paradigm by introducing an augmented sentence ${\\\\bf{s}}^{\\\\prime}$ following the initial input s. This augmented sentence contains explicit sentiment tied to the aspect term a . For clarity, we present the comprehensive framework of ABSA-ESA in Figure 2.  \\n\\nTo generate the augmented sentence ${\\\\bf{s}}^{\\\\prime}$ , we propose an ABSA-specific explicit sentiment generation method. We post-train T5 by utilizing $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ as generation targets selected from the dataset. $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ has the same (or similar) aspect terms and sentiment as swhile also incorporating explicit sentiment expressions. Additionally, we utilize three strategies to guide the generation concerning sentiment polarity and aspect terms. During the training phase, a Syntax Distance Weighting strategy is implemented to prioritize context words closest to the aspect term in the dependency parse. Furthermore, we also gather $\\\\bar{\\\\mathbf{s}}^{\\\\prime}$ , which has the opposite sentiment of s, for Unlikelihood Contrastive Regularization. It instructs the model about undesirable word choices. When generating ${\\\\bf s}^{\\\\prime}$ , we employ Constrained Beam Search to ensure that the aspect term or its similar aspect is included in the augmentations and its context words are the most relevant to a .  \\n\\n  \\nFigure 2: Overall framework of ABSA-ESA.  \\n\\nNext, we introduce the details of the ABSA-specific explicit sentiment generation method.\\n\\n# Training Data Collection\\nTo train the explicit sentiment generation model, the initial step is to gather the training data. Given an input sentence sand its corresponding aspect term a , the generating target $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ must satisfy the following rules:  \\n\\n• The target sentence should incorporate the same (or similar) aspect term as the input sentence.   \\n• The target sentence should exhibit identical sentiment polarity to the input data.   \\n• The target sentence must contain explicit sentiment expressions .  \\n\\nTo obtain the target sentence $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ that satisfies the given rules, we begin by aggregating all aspect terms in dataset $\\\\mathcal{D}$ aspect term to construct the aspect term set $\\\\bar{\\\\bf a}_{i}$ is associated with a representation $\\\\bar{\\\\mathcal{A}}~=~\\\\{\\\\bar{\\\\mathbf{a}}_{i}\\\\}_{i=1}^{|\\\\mathcal{A}|}$ $r_{\\\\bar{\\\\mathbf{a}}_{i}}{}^{1}$ . Each acquired by consulting the GloVe embedding table (Pennington, Socher, and Manning 2014). Utilizing these represen$\\\\mathcal{R}~=~\\\\{r_{\\\\bar{\\\\mathbf{a}}_{i}}\\\\}_{i=1}^{|A|}$ , we formulate a similarity matrix $\\\\mathbf{C}\\\\,=\\\\,\\\\{c_{i j}\\\\}_{|A|\\\\times|A|}$ tween aspect terms {}|A|×|A| $\\\\bar{\\\\bf a}_{i}$ and here $\\\\bar{\\\\mathbf{a}}_{j}.\\\\,c_{i j}$ $c_{i j}$ represents the similarity beis computed by the cosine distance:  \\n\\n$$\\nc_{i j}=\\\\cos(r_{\\\\bar{\\\\mathbf{a}}_{i}},r_{\\\\bar{\\\\mathbf{a}}_{j}}).\\n$$  \\n\\nWith the similarity matrix $\\\\mathbf{C}$ available, we proceed to the selection of $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ . According to Li et al. (2021b), the dataset $\\\\mathcal{D}$ $\\\\mathcal{D}_{i}$ Dsions, we choose the can be divided int . As sentences in $\\\\mathcal{D}_{e}$ ˆ$\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ contain explicit sentiment expresplicit subset from this subset to fulfill the third $\\\\mathcal{D}_{e}$ and implicit subset rule above.  \\n\\nWe firs elect $k_{c}$ aspect terms fro $\\\\mathcal{R}$ , which are most similar to a , thereby f ing the set A $\\\\mathcal{A^{\\\\prime}}$ . Subsequently, extract sentences from and share the same sentiment as D$\\\\mathcal{D}_{e}$ containing aspect terms from s. This forms the candidate A target sentence s $\\\\hat{S}_{t}$ rom this set, we randomly choose a target sentence $\\\\hat{\\\\mathbf{s}}^{\\\\prime}\\\\in\\\\hat{S}_{t}$ ∈Sto generate training data $(\\\\mathbf{s},\\\\hat{\\\\mathbf{s}}^{\\\\prime},\\\\mathbf{a})$ with the input sentence and the corresponding aspect term. This process is iterated for all input sentences $\\\\mathbf{s}\\\\in\\\\mathcal{D}$ , resultward, we begin to post-train T5 by ing the final training dataset $\\\\hat{\\\\mathcal{D}}\\\\,=\\\\,\\\\{(\\\\mathbf{s}_{i},\\\\hat{\\\\mathbf{s}}_{i}^{\\\\prime},\\\\mathbf{a}_{i})\\\\}_{i=1}^{N}$ D.}. After\\npaper_title: BiSyn-GAT+: Bi-Syntax Aware Graph Attention Network for Aspect-based Sentiment Analysis\\npaper_metainfo: Conf_Paper_Meta_Data_ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics_with_whole_text.db\\nchunk_id: 0\\n# BiSyn-GAT $^{\\\\mp}$ : Bi-Syntax Aware Graph Attention Network for Aspect-based Sentiment Analysis\\nShuo Liang 1 , Wei Wei , Xian-Ling Mao 3 , Fei Wang 4 , Zhiyong He 5 Cognitive Computing and Intelligent Information Processing (CCIIP) Laboratory, School of Computer Science and Technology, Huazhong University of Science and Technology 3 School of Computer Science and Technology, Beijing Institute of Technology 4 Institute of Computig Technology, Chinese Academy of Sciences 5 Naval University of Engineering 1  ,2  ,3  ,4  ,5\\n\\n# Abstract\\nAspect-based sentiment analysis (ABSA) is a fine-grained sentiment analysis task that aims to align aspects and corresponding sentiments for aspect-specific sentiment polarity inference. It is challenging because a sentence may contain multiple aspects or complicated ( e.g., conditional, coordinating, or adversative) relations. Recently, exploiting dependency syntax information with graph neural networks has been the most popular trend. Despite its success, methods that heavily rely on the dependency tree pose challenges in accurately modeling the alignment of the aspects and their words indicative of sentiment, since the dependency tree may provide noisy signals of unrelated associations ( e.g., the “ conj ” relation between “ great ” and “ dreadful ” in Figure 2 ). In this paper, to alleviate this problem, we propose a Bi -Syn tax aware Graph At tention Network ( BiSyn-GAT+ ). Specifically, BiSyn-GAT+ fully exploits the syntax information ( e.g., phrase segmentation and hierarchical structure) of the constituent tree of a sentence to model the sentiment-aware context of every single aspect (called intra -context) and the sentiment relations across aspects (called inter -context) for learning. Experiments on four benchmark datasets demonstrate that BiSyn-GAT $^+$ outperforms the stateof-the-art methods consistently.\\n\\n# 1 Introduction\\nAspect-based sentiment analysis (ABSA) aims to identify the sentiment polarity towards a given aspect in the sentence. Many previous works ( Yang et al. ,2018 ;Li et al. ,2019 ) mainly focus on extracting sequence features via Recurrent Neural Networks (RNNs) or Convolution Neural Networks (CNNs) with attention mechanisms, which often assume that words closer to the target aspect are more likely to be related to its sentiment. However, the assumption might not be valid as exemplified in Figure 1 (a), “service” is obviously closer to “great” rather than “dreadful”, and these methods may assign the irrelevant opinion word “great” to “service” mistakenly.  \\n\\n  \\nFigure 1: Examples of ABSA task. Each underlined aspect is classified to corresponding sentiment polarity.  \\n\\n  \\nFigure 2: Dependency tree of “The food is great but the service and the environment are dreadful”. Two separate ellipses encircle its two clauses. The “conj” edge between “great” and “dreadful” is a noise.  \\n\\nTo mitigate this problem, there already exists several efforts ( Wang et al. ,2020a ;Chen et al. ,2020 )dedicated to research on how to effectively leverage non-sequential information ( e.g., syntactic information like dependency tree) via Graph Neural Networks (GNNs). Generally, a dependency tree ( i.e., Dep.Tree), linking the aspect terms to the syntactically related words, stays valid in the long-distance dependency problem. However, the inherent nature of Dep.Tree structure may introduce noise like the unrelated relations across clauses, such as “conj” relation between “great” and “dreadful\" in Figure 2 ,which discourages capturing the sentiment-aware context of each aspect, i.e., intra -context. Moreover, the Dep.Tree structure only reveals relations between words and, thereby, in most cases, is incapable of modeling complicated ( e.g., conditional, coordinating, or adversative) relations of sentences, therefore failing to capture sentiment relations between aspects, i.e., inter -context.  \\n\\n  \\nFigure 3: Constituent tree of the sentence “The food is great but the service and the environment are dreadful”. Context words are in rectangles and parsed phrase types are in rounded rectangles.  \\n\\nHence, in this paper, we consider fully exploiting the syntax information of the constituent tree to tackle the problem. Typically, a constituent tree (i.e., Con.Tree) often contains precise and discriminative phrase segmentation and hierarchical composition structure, which are helpful for correctly aligning the aspects and their corresponding words indicative of sentiment. The former can naturally divide a complicated sentence into multiple clauses, and the latter can discriminate different relations among aspects to infer the sentiment relations of different aspects. We illustrate this with an example in Figure 3 : (1) Clause “The food is great” and the clause “the service and environment are dreadful” are segmented by the phrase segmentation term “but”; (2) In Layer-1, the term “and” indicates the coordinating relation of “service” and “environment”, while the term “but” in Layer-3 reflects the adversative relation towards “food” and “service” (or “environment”).  \\n\\nThus, to better align aspect terms and corresponding sentiments, we propose a new framework, Bi -Syn tax aware Graph At tention Network (BiSyn-GAT+ ), to effectively leverage the syntax information of constituent tree by modeling intra -context and inter -context information. In particular, BiSyn-GAT $^+$ employs: 1) a syntax graph embedding to encode the intra -context of each aspect based on the fusion syntax information within the same clause in a bottom-up way, which combines the phrase-level syntax information of its constituent tree and the clause-level syntax information of its dependency tree. 2) an aspect-context graph consisting of phrase segmentation terms and all aspects to model the inter -context of each aspect. Specifically, it aggregates the sentiment information of other aspects according to the influence between the current aspect and its neighbor aspects, which is calculated based on aspect representations learned from bi-directional relations over the aspect context graph, respectively.  \\n\\nOur main contributions are as follows:  \\n\\n(1) To the best of our knowledge, this is the first work to exploit syntax information of constituent tree ( e.g., phrase segmentation and hierarchical structure) with GNNs for ABSA. Moreover, it shows superiority in the alignments between aspects and corresponding words indicative of sentiment.  \\n\\n(2) We propose a framework, Bi -Syn tax aware Graph At tention Network ( BiSyn-GAT+ ), to fully leverage syntax information of constituent tree (or, and dependency tree) by modeling the sentimentaware context of each single aspect and the sentiment relations across aspects.  \\n\\n(3) Extensive experiments on four datasets show that our proposed model achieves state-of-the-art performances.\\npaper_title: Counterfactual-Enhanced Information Bottleneck for Aspect-Based Sentiment Analysis\\npaper_metainfo: Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db\\nchunk_id: 1\\n# Introduction\\nAspect-based sentiment analysis (ABSA), which aims to identify the sentiment of a specific aspect in a sentence, has raised increasing interest in both academic and industrial communities (Zhang et al. 2022). For accurate and stable sentiment prediction in this fine-grained sentiment analysis task, it is essential to capture the context words expressing opinions towards the target aspect.  \\n\\nSo far, deep learning techniques have been predominant in the ABSA task. Deep neural networks can automatically and efficiently learn discriminative contextual representations of both the context and aspect without time-consuming human annotation (Negi and Buitelaar 2014). To model the semantic relationship between the target aspect and its context, various attention mechanisms have been proposed to learn interactive features of the context and aspect (Tang, Qin, and Liu 2016; Ma et al. 2017; Lei et al. 2019). In another line, several works explicitly capture syntax-aware features for the aspect by incorporating the syntactic knowledge with graph neural networks (Huang and Carley 2019; Wang et al. 2020; Liang et al. 2022). More recently, pre-trained language models (PLMs), such as BERT (Devlin et al. 2019), have been applied to the ABSA task, yielding state-of-theart results (Song et al. 2019; Xu et al. 2019; Jiang et al. 2019; Zhang, Zhou, and Wang 2022). Extensive linguistic knowledge learned from the large-scale textual corpus can be utilized to improve the performance for ABSA.  \\n\\n  \\nFigure 1: Spurious correlation between the context words “never had ” and the sentiment label “P OSITIVE ” in the restaurant dataset. We use counterfactual data featuring identical spurious context words while different sentiment labels to encourage the model to capture vital opinion words.  \\n\\nDespite the effectiveness of prior studies, few efforts are devoted to mitigating the spurious correlation problem for ABSA. Specifically, deep ABSA models appear to associate superficial patterns with predicted labels, which are held by most training samples but not intrinsic to the ABSA task. For example, as shown in Figure 1, due to the high cooccurrence of the context words “ never had ” and the sentiment label “P OSITIVE ” in the training corpus, deep models tend to learn the strong correlation between the context words “never had” and the label “P OSITIVE ”, rather than capturing the semantically crucial opinion expressions. As a result, models would fail to infer the ground-truth label “N EUTRAL ” for the testing instance, which contains the words “ never had ” without holding this spurious correlation. Under such an inductive bias, models that have achieved promising performance on the in-domain data would suffer from poor robustness against the out-of-distribution or more challenging data. One possible solution to tackle this challenge is to introduce counterfactual data with the similar spurious context words while opposite sentiment labels to motivate the counterfactual thinking (Wang et al. 2022) ability of the ABSA model. In this way, the model can pay more attention to semantically relevant opinion words for the target aspect. In addition, incorporating the original data with the augmented counterfactual data without considering their interactions would even exacerbate the model performance. Thus, it poses a non-trivial challenge to devise a strategy to effectively exploit the interactions between factual and counterfactual data for improving the robustness of the deep ABSA model.  \\n\\nIn light of this, we propose a CounterfactualEnhanced I nformation Bottleneck framework (called CEIB) to reduce spurious correlations for ABSA, aiming to improve the robustness of the deep ABSA model. The proposed CEIB framework learns a more robust model by taking benefits of both the large language model (LLM) to generate counterfactual data from the original training data and the information bottleneck (IB) principle to model interactions between the original data and augmented data. Specifically, we first devise a multi-pattern prompting method, utilizing LLM to generate high-quality counterfactual samples from the original training samples. Then, we employ the IB principle to discard spurious features from the input while retaining essential predictive information for the sentiment label. To enhance the capacity of CEIB in characterizing adversarial and out-of-distribution data, we separate the mutual information in the original IB objective into factual and counterfactual parts by leveraging the original factual sample and the generated counterfactual sample. By balancing the predictive information of these two parts, we can learn more robust and balanced representations for the ABSA task. The main contributions in this paper can be summarized as follows:  \\n\\n• We propose a novel CEIB framework for robust aspectbased sentiment analysis, which reduces spurious correlations by taking advantage of both the IB principle and counterfactual data augmentation, with the aim of learning a more robust ABSA model.   \\n• We devise a multi-pattern prompting-based method, utilizing LLM to generate high-quality counterfactual data, which are then leveraged to balance the predictive information of the original training data to learn effective and robust representations.   \\n• We conduct extensive experiments on five widely utilized benchmark ABSA datasets. Experimental results show that CEIB achieves better prediction and robustness performance compared to the strong competitors.\\npaper_title: StoryER: Automatic Story Evaluation Via Ranking, Rating and Reasoning\\npaper_metainfo: Conf_Paper_Meta_Data_EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing_with_whole_text.db\\nchunk_id: 4\\n# 6.2 Aspect Evaluation\\nWe evaluate our model for predicting confidence scores and ratings for the aspects. For confidence scores, we calculate the recall performance on top$\\\\boldsymbol{\\\\mathrm{k}}$ (i.e., $_{\\\\mathrm{k}=1,3,5}$ ) on the test split of 46K Aspect Rating and Reasoning data to show the percentage of human selected aspects that can be involved within the aspects with top-k confidence. For ratings, we calculate the correlation between human annotation and our model prediction. Table 5 shows the results compared with joint training other two tasks.  \\n\\n<html><body><table><tr><td rowspan=\"2\">ps</td><td rowspan=\"2\">a</td><td rowspan=\"2\">C</td><td rowspan=\"2\">N</td><td colspan=\"3\">Confidence</td><td colspan=\"2\">Rating</td></tr><tr><td>R@1</td><td>R@3</td><td>R@5</td><td>p</td><td>T</td></tr><tr><td></td><td>√</td><td></td><td></td><td>16.06</td><td>46.05</td><td>73.59</td><td>0.190</td><td>0.140</td></tr><tr><td>√</td><td></td><td></td><td></td><td>17.36</td><td>51.59</td><td>76.30</td><td>0.227*</td><td>0.168*</td></tr><tr><td>√</td><td></td><td>人</td><td></td><td>19.94</td><td>52.68</td><td>79.64</td><td>0.248*</td><td>0.185*</td></tr><tr><td>人</td><td>√</td><td>人</td><td>√</td><td>19.88</td><td>51.44</td><td>79.20</td><td>0.216*</td><td>0.161*</td></tr></table></body></html>  \\n\\nTable 5: Evaluation on aspect confidence and rating. $p_{s}$ ,a ,c,$N$ denotes the preference score, aspects, comments and negative samples that are used in training our model respectively.   \\n\\n\\n<html><body><table><tr><td></td><td></td><td></td><td></td><td colspan=\"3\">Automatic</td><td colspan=\"4\">Human</td></tr><tr><td>Ps</td><td>a</td><td>C</td><td>N</td><td>PPL</td><td>B</td><td>R</td><td>0</td><td>Rel(s)</td><td>Rel(a)</td><td>Rel(r)</td></tr><tr><td></td><td></td><td>√</td><td></td><td>7.31</td><td>8.45</td><td>16.63</td><td>47.61</td><td>73.70</td><td>79.20</td><td></td></tr><tr><td>√</td><td></td><td></td><td></td><td>7.06</td><td>8.60</td><td>16.76</td><td>49.40</td><td>72.93</td><td>82.83</td><td>58.33</td></tr><tr><td>√</td><td>√</td><td>√</td><td>√</td><td>7.95</td><td>8.36</td><td>16.69</td><td>43.45</td><td>68.64</td><td>81.84</td><td>50.49</td></tr></table></body></html>  \\n\\nTable 6: Comment generation evaluation on automatic scores and human evaluation. In human evaluation, the kappa coefficient $\\\\kappa$ for each score are located in 0.4-0.6, indicating a moderate agreement between annotators.  \\n\\nStory ranking and reasoning help the model output more correct confidence and ratings.\\n\\n# 6.3 Comment Evaluation\\nWe evaluate the comment generation with automatic metrics and human evaluation. For automatic scores, we apply Perplexity (PPL), Averaged BLEU1-4 (B), ROUGE (R). For human evaluation, we mainly measure the relativeness between comments with the given story Rel(s) , aspect category Rel(a) and rating score (0-1 negative-positive) $\\\\mathbf{Rel}(\\\\mathbf{r})$ . We also measure Overall (O) quality by calculating the percentage of the comments that are agreed upon by annotators. Each comment is assigned to 5 annotators with a binary choice (i.e., related or not related, agree or not agree). From the result in Table 10 , our generated comments are highly related to the given stories and the aspects. Together with the training on preference score prediction and aspect rating further improve the comment generation performance. The results so far show that the preference score, aspects, and comments all benefit one another, illustrating the significance of incorporating aspects and comments into our task.\\n\\n# 7 Discussion\\n\\n# 7.1 Pairwise Evaluation with StoryER\\nGiven a set of prompts, two story generation models can generate stories based on the given prompt. We have two straightforward ways to compare two models using our proposed preference scores: 1) average the preference scores for stories on each model and compare the mean average scores. 2) perform pairwise comparisons for stories from the same prompt and get the preference percentage. We recommend the second method as it strictly follows our pairwise ranking strategy.\\n\\n# 7.2 Domain Transfer in Preference Score\\nTo show the generalization of evaluation metrics, we calculate the averaged predicted preference scores for data from different domains (see Table 7 ). We compute average scores on 1) lowlyvoted (low) and highly-voted stories (high) on both $\\\\mathrm{WP200}$ and $\\\\mathrm{SCARY_{200}}$ , 2) machine-generated stories by LED (LED), and with Plan-and-Write strategy ( Yao et al. ,2019 ) (P&W) trained separately on the highly-upvoted and lowly-upvoted stories, 3) negative stories generated from previous works ( Guan and Huang ,2020 ;Ghazarian et al. ,2021 ), 4) stories from other datasets: fairy tales (short stories), childbook dataset ( Hill et al. ,2015 )and bookcorpus ( Zhu et al. ,2015 ).  \\n\\nAs shown in Table 7 , UNION and MANPLTS consistently produce higher scores for humanwritten stories (Human and Other blocks) while producing lower scores for machine-generated stories (Machine and N blocks). While looking into more details, we can see that they cannot successfully distinguish the story quality, e.g., $\\\\mathrm{SCARY_{200}}($ (low) and $\\\\mathrm{SCARY_{200}}$ (high) receive identical scores. These observations strongly indicate that UNION and MANPLTS work well on evaluating coherence but deviate from human preference when evaluating human-written stories.  \\n\\nOur method, on the other hand, is capable of following human preference (Human and Machine block) (also see $\\\\mathrm{SCARY_{200}(l o r}$ w) and $\\\\mathrm{SCARY_{200}(h i g h)}$ as an example). The model trained with highly-voted stories can generate better stories than that trained with lowly-voted stories, and P&W strategy performs even better as proved in many previous works ( Fan et al. ,2019 ;Tan et al. ,2021 ). From the results, our model produces higher scores for LED (high) compared with LED (low) and even higher scores for LED P&W (high), which indicates that our model still follows the human preference on machine-generated stories. As serious coherence problems do not commonly occur in our training data, our method show failure in recognizing manually created incoherent stories (N block). However, our model (Ours (N)) works after we incorporate these stories into our training data, leading to a future direction that unifies the coherence-based and preference-aware metrics. Surprisingly, our model gives relatively low scores when adopting stories from other domains (Other block). We think this is because the writing style changes the criterion of human preference, which misleads our model to predict a not reasonable score, thus leading us to a big challenge in generalizing preference-aware story evaluation.  \\n\\nTable 7: Our model and existing works on various domains of stories. We report the averaged preference score on stories from four different domains.   \\n\\n\\n<html><body><table><tr><td></td><td>Dataset</td><td>Coherence UNION</td><td>MANPLTS</td><td>Preference Ours</td><td>Hybrid Ours (N)</td></tr><tr><td>an Huma</td><td>WP200(low) WP20o(high) SCARY200(low) SCARY200(high)</td><td>0.771 0.837 0.833 0.895</td><td>0.878 0.948 0.825 0.850</td><td>0.347 0.692 0.355 0.743</td><td>0.655 0.884 0.625 0.883</td></tr><tr><td>lac</td><td>LED (low) LEDP&W(low) LED (high) LEDP&W (high)</td><td>0.687 0.775 0.588 0.760</td><td>0.091 0.300 0.001 0.393</td><td>0.297 0.535 0.409 0.573</td><td>0.290 0.305 0.290 0.308</td></tr><tr><td>Z</td><td>Negative(UNION) Negative(MANPLTS)</td><td>0.360 0.414</td><td>0.003 0.228 0.500</td><td>0.244 0.319 0.233</td><td>0.019 0.027</td></tr><tr><td>Othe</td><td>fairy tale (short) childbook(long) bookcorpus (long)</td><td>0.917 0.886 0.965</td><td>0.915 0.949</td><td>0.318 0.285</td><td>0.482 0.476 0.416</td></tr></table></body></html>\\n'"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "section_paper_texts[0][0][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:53:05.619158Z",
     "start_time": "2025-01-14T02:53:05.615413Z"
    }
   },
   "outputs": [],
   "source": [
    "section = parsed_outline['sections'][0]\n",
    "subsection = parsed_outline['subsections'][1][0]\n",
    "description = parsed_outline['subsection_descriptions'][1][0]\n",
    "paper_list = section_paper_texts[0][1][0]\n",
    "subsection_len = 500\n",
    "# citation_num = 5\n",
    "prompt = __generate_prompt(SUBSECTION_WRITING_PROMPT,\n",
    "                           paras={'OVERALL OUTLINE': final_outline_wo_description, 'SUBSECTION NAME': subsection,\n",
    "                                  'DESCRIPTION': description, 'TOPIC': topic, 'PAPER LIST': paper_list,\n",
    "                                  'SECTION NAME': section, 'WORD NUM': str(subsection_len)})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'Aspect Based Sentiment Analysis'"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "topic"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\nYou are an expert in artificial intelligence who wants to write a overall and comprehensive survey about Aspect Based Sentiment Analysis.\\nYou have created a overall outline below:\\n---\\n# Aspect-Based Sentiment Analysis: A Comprehensive Survey\\n\\n## 1 Introduction to Aspect-Based Sentiment Analysis\\n\\n### 1.1 Definition and Importance of Aspect-Based Sentiment Analysis\\n\\n### 1.2 Applications of ABSA in Various Domains\\n\\n### 1.3 ABSA Subtasks and Their Objectives\\n\\n## 2 Evolution of ABSA Techniques\\n\\n### 2.1 Early Methods Based on Handcrafted Features\\n\\n### 2.2 Deep Learning Encoders\\n\\n### 2.3 Attention Mechanisms\\n\\n### 2.4 Pre-trained Language Models\\n\\n### 2.5 Graph Convolutional Networks\\n\\n### 2.6 Data Augmentation Techniques\\n\\n### 2.7 External Knowledge Integration\\n\\n### 2.8 Counterfactual Data Generation\\n\\n### 2.9 Domain-Adaptive Language Modeling\\n\\n## 3 Challenges and Limitations in ABSA\\n\\n### 3.1 Semantic Mismatch\\n\\n### 3.2 Data Scarcity\\n\\n### 3.3 Overfitting\\n\\n### 3.4 Handling Implicit Sentiment\\n\\n### 3.5 Addressing Data Sparsity\\n\\n### 3.6 Mitigating Spurious Correlations\\n\\n## 4 Methods and Techniques in ABSA\\n\\n### 4.1 Attention Mechanisms\\n\\n### 4.2 Position Information\\n\\n### 4.3 Neighboring Span Enhanced Modules\\n\\n### 4.4 Multi-perspective Attention Modules\\n\\n### 4.5 Pseudo-labeling\\n\\n### 4.6 Data Augmentation\\n\\n### 4.7 Knowledge Graph Augmented Networks\\n\\n### 4.8 Bidirectional Cross-attention Frameworks\\n\\n## 5 Knowledge Graph and External Knowledge Integration\\n\\n### 5.1 Knowledge Graph Embeddings for ABSA\\n\\n### 5.2 Integration Strategies for Knowledge Graphs\\n\\n### 5.3 Pre-trained Language Models with Knowledge Graphs\\n\\n### 5.4 Hierarchical Fusion of Multi-view Representations\\n\\n### 5.5 Evaluation and Benchmarking of Knowledge-Enhanced ABSA Models\\n\\n### 5.6 Comparison with State-of-the-Art ABSA Models\\n\\n### 5.7 Future Directions for Knowledge Graph Integration in ABSA\\n\\n## 6 Datasets and Evaluation Metrics\\n\\n### 6.1 Popular ABSA Datasets\\n\\n### 6.2 OpenAsp Dataset\\n\\n### 6.3 OABS Datasets\\n\\n### 6.4 Evaluation Metrics\\n\\n### 6.5 Experimental Results and Analysis\\n\\n## 7 Future Directions and Open Challenges\\n\\n### 7.1 Exploration of New Techniques\\n\\n### 7.2 Weak Supervision\\n\\n### 7.3 Counterfactual Data Generation\\n\\n### 7.4 Domain-Adaptive Language Modeling\\n\\n### 7.5 Handling Implicit Sentiment\\n\\n### 7.6 Addressing Spurious Correlation Problem\\n\\n## 8 Ethical Considerations and Broader Impact\\n\\n### 8.1 Bias and Fairness in ABSA\\n\\n### 8.2 Privacy Concerns in ABSA\\n\\n### 8.3 Transparency and Interpretability of ABSA Models\\n\\n### 8.4 Impact of ABSA on Society and Industry\\n\\n### 8.5 Regulatory Considerations and Legal Implications\\n\\n### 8.6 Future Directions in Ethical ABSA Research\\n\\n---\\nBelow are a list of papers for references:\\n---\\npaper_title: AX-MABSA: A Framework for Extremely Weakly Supervised Multi-label Aspect Based Sentiment Analysis\\npaper_metainfo: Conf_Paper_Meta_Data_EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing_with_whole_text.db\\nchunk_id: 1\\n# 1 Introduction\\nAspect-based sentiment analysis (ABSA) is a wellknown sentiment analysis task which provides more fine-grained information than simple sentiment understanding ( Liu ,2012 ). The main goal of ABSA is to find the aspects and its associated sentiment within a given text. While the works on ABSA have expanded in different directions, it has primarily two sub-tasks, Aspect Term Sentiment Analysis (ATSA) and Aspect Category Sentiment Analysis (ACSA) ( Xue and Li ,2018 ). ATSA consists of different tasks like aspect term extraction (Li et al. ,2018 ;Luo et al. ,2019 ;Li et al. ,2020a ;Shi et al. ,2021 ), aspect term sentiment classification ( He et al. ,2018 ;Chen and Qian ,2019 ;Hou et al. ,2021 ), opinion term extraction ( Dai and Song ,2019 ;He et al. ,2019 ;Chen and Qian ,2020b ), aspect-oriented opinion term extraction ( Fan et al. ,2019 ;Wu et al. ,2020a ), aspect-opinion pair extraction ( Zhao et al. ,2020 ), etc. For example, in the sentence “ The sushi is top-notch, the waiter is attentive, but the atmosphere is dull. \", ATSA would extract the aspect terms ‘ sushi ’, ‘ waiter ’ and ‘ atmosphere ’; opinion terms ‘ top-notch ’, ‘ attentive ’, and ‘dull ’; and their associated sentiments ‘ positive ’, ‘positive ’ and ‘ negative ’. The other sub-task ACSA aims to find the higher order aspect categories and its associated sentiment from a given text. In the above example, ACSA would detect the categories as ‘ food ’ (as ‘pasta’ is a type of ‘food’), ‘ service ’and ‘ ambience ’; and the associated sentiments as ‘positive ’, ‘ positive ’ and ‘ negative ’.  \\n\\nExisting research on ABSA is dominated by supervised methods, where labeled training data is provided ( Chen et al. ,2017 ;Xue and Li ,2018 ;Cai et al. ,2021 ;Liu et al. ,2021 ;Xu et al. ,2021 ;Yan et al. ,2021 ). A few works try to solve the problem in a weakly/semi-supervised manner, where a few labelled samples are provided ( Wang et al. ,2021a ). However, there has been a lack of study on ABSA using unsupervised methods , i.e., without using any labelled data. A few works also focused on unsupervised aspect term extraction ( Shi et al. ,2021 ). However, such works do not deal with the sentiment associated with the aspects. An existing work on weakly supervised ACSA ( Huang et al. ,2020 ) only considered a single aspect category per sentence – thus limiting the task to a larger extent.  \\n\\nMotivated by the above, in this work, we present a methodology for extremely weakly supervised ACSA task, where we do not need any labelled training samples. We solve both aspect category detection (ACD) and ACSA tasks (on each review sentence) just by using the surface text of aspect category and sentiment. Given $N$ review sentences, $C$ categories of interest and $P$ polarities of interest, the ACD task generates $C$ clusters, while the AC generates $(c_{i},\\\\,p_{j})$ tuples where $c_{i}\\\\in C$ ,and the representation learning perspective, wherein $p_{j}\\\\in P$ ∈. As in ( Wang et al. ,2021b ), we adopt representing sentences by class names leads to better clustering. We only use the surface text of the class names and unlabelled sentences to get aspect category and sentiment clusters.  \\n\\nHowever, in clustering, each review sentence would get only one label, thus limiting the task by a substantial extent. To tackle this, we propose X-MABSA , a multi-label generator model which makes use of dependency parser ( Qi et al. ,2020 )and a similarity-based attention mechanism to generate multiple categories and associated sentiment polarity labels for each review sentence. In addition, we find that sometimes the representative text of aspect categories (provided as input) is not present (or sparse) in the text corpus. This might lead to skewed representation of the classes in our framework and thus degrade performance. Therefore, we present an automatic surface word selection strategy which would represent the class names better. We combine this with our X-MABSA model and denote it as AX-MABSA.  \\n\\nWe also showcase that unsupervised posttraining of language model on domain specific data significantly improves the sentence representation and thus achieves better results for ACSA tasks. For this, we post-train BERT language model ( Devlin et al. ,2019 ) using domain specific unlabelled data. We perform experiments on four different benchmark aspect-based datasets ( Pontiki et al. ,2014 ,2015 ,2016 ;Cheng et al. ,2017 ), and compare with different supervised and weakly supervised baselines.  \\n\\nOur main contributions are as follows:  \\n\\n•an extremely weakly supervised method to solve the ACSA task without relying on any labelled data, and using only the class names as the only provided information; •an automatic surface word selection strategy for choosing a suitable word corresponding to each aspect and sentiment class; •use of BERT language model post-training on domain specific unlabelled data for semantic representation of review sentences; •a multi-label generator model which makes use of a dependency parser and a similaritybased attention mechanism for generating multiple aspect-sentiment labels for each sentence; and  \\n\\n•experimental results comparing our architecture with different existing baselines on four benchmark aspect datasets.\\npaper_title: AX-MABSA: A Framework for Extremely Weakly Supervised Multi-label Aspect Based Sentiment Analysis\\npaper_metainfo: Conf_Paper_Meta_Data_EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing_with_whole_text.db\\nchunk_id: 2\\n# 2 Related Work\\nAspect Based Sentiment Analysis (ABSA) has gained significant attention for a long time, and research has been done in primarily two directions – Aspect Term Sentiment Analysis (ATSA) and Aspect Category Sentiment Analysis (ACSA).\\n\\n# 2.1 Aspect Term Sentiment Analysis\\nResearch on ATSA has been in different subcategories like,  \\n\\nAspect Term Extraction In this sub-task, aspect terms associated with a category are extracted from a given text. Prior research on this is based on sequence labelling problem ( Ma et al. ,2019 ;Li et al. ,2020a ). Li and Lam (2017 ) proposed a neural network-based deep multi-task framework with memory network for extracting aspect terms. Xu et al. (2018 ) presented a double embedding method which uses CNN ( LeCun et al. ,1995 )-based sequence tagging, while Li et al. (2018 ) considered summary of opinions expressed in text as well as the history of aspect detection for effective aspect term extraction. Chen and Qian (2020a ) proposed a soft prototype-based approach with aspect word correlations to improve quality. A few unsupervised methods have tried to improve performance by using traditional topic modelling-based models. Luo et al. (2019 ) proposed a neural network based unsupervised model which takes sememes for better lexical semantics. Shi et al. (2021 ) presented a self-supervised method which works on learning aspect embedding on the word embedding space for aspect extraction.  \\n\\nAspect-level Sentiment Classification In this sub-task, sentiment labels are assigned to each aspect term. Wang et al. (2016 ); Liu and Zhang (2017 ); Ma et al. (2017 ) proposed an attentionbased neural network model for aspect-level sentiment classification (ASC). Tay et al. (2018 ) modelled relationship between words and aspects using LSTM model ( Hochreiter and Schmidhuber ,1997) to improve ASC performance.He et al.(2018 ) showed that document knowledge transfer improved performance of ASC task. Chen and Qian (2019 ) proposed a transfer capsule network for transferring knowledge from document-level sentiment classification, while Hou et al. (2021 )adopted a dependency tree-based graph neural network to solve the ASC task.  \\n\\nAspect-oriented Opinion Extraction This task extracts opinion terms associated with aspect terms. Fan et al. (2019 ) designed a sequence label model which used LSTM ( Hochreiter and Schmidhuber ,1997 ) for aspect-oriented opinion extraction (AOE). Wu et al. (2020a ) proposed a tagging scheme for AOE task which uses CNN ( LeCun et al. ,1995 ), LSTM ( Hochreiter and Schmidhuber ,1997 ) and BERT ( Devlin et al. ,2019 ) for opinion extraction. Wu et al. (2020b ) proposed a transfer learning method for transferring knowledge from sentiment classification task to AOE task.  \\n\\nRecent works on ATSA have introduced more sub-tasks like aspect-opinion pair extraction, aspect-sentiment-opinion triplet extraction, aspectcategory-opinion-sentiment quadruple extraction, etc. Yan et al. (2021 ) proposed a BART ( Lewis et al. ,2020 ) -based model to solve all ATSA tasks. Cai et al. (2021 ) introduced a new task called, aspect-category-opinion-sentiment quadruple extraction, a BERT ( Devlin et al. ,2019 )-based model to deal with implicit aspects and opinion terms. Xu et al. (2021 ) proposed a new span-level method for the aspect-sentiment-opinion triplet extraction.\\n\\n# 2.2 Aspect Category Sentiment Analysis\\nAspect Category Sentiment Analysis (ACSA) finds aspect categories and their associated sentiments from a text. Research on this has been conducted on both Aspect Category Detection (ACD) and ACSA tasks. Ma et al. (2018 ) proposed a word attention-based hierarchical model which takes common-sense knowledge for solving ACSA task. Xue and Li (2018 ) presented a novel CNN ( LeCun et al. ,1995 )-based model for ACSA task. Liang et al. (2019 ) proposed an encoding scheme which was aspect-guided and able to perform aspectreconstruction. Sun et al. (2019 ) constructed an auxiliary text for aspects and reformed the ACSA as a classification task.  \\n\\nWang et al. (2020 ) proposed a novel dependency tree-based model and a relational graph attention network for encoding the sentences. Li et al. (2020b ) designed a multi-instance framework for multi-label ACSA task. Cai et al. (2020 ) reformed the task as sentiment-category with a two-layer hierarchy where the higher layer detected the sentiment while the lower layer detected the aspect category. Liang et al. (2021 ) presented a semisupervised framework having a beta distributionbased model. The model finds semantically related words from the context of a target aspect. Liu et al. (2021 ) solved the ACSA task as a text generative method using BART ( Lewis et al. ,2020 ). Zhang et al. (2021 ) presented aspect sentiment quad prediction task where ACSA was formulated as a paraphrase generation task.  \\n\\nAlmost all existing works on ACSA are based on supervised methods. In contrast, this work proposes a method for ACSA which does not require any labelled data and relies only on seed text for aspect class names.\\npaper_title: Aspect-Based Sentiment Analysis with Explicit Sentiment Augmentations\\npaper_metainfo: Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db\\nchunk_id: 0\\n# Aspect-Based Sentiment Analysis with Explicit Sentiment Augmentations\\nJihong Ouyang , Zhiyao $\\\\mathbf{Yang}^{1,2*}$ , Silong Liang , Bing Wang , Yimeng Wang 1 , Ximing $\\\\mathbf{Li}^{1,2\\\\dagger}$  \\n\\n1 College of Computer Science and Technology, Jilin University, China 2 Key Laboratory of Symbolic Computation and Knowledge Engineering of MOE, Jilin University, China {ouyj $@$ , zhiyaoy $^{20\\\\@}$ mails., liangsl23 $@$ mails. }jlu.edu.cn, {wangbing1416, wangyimeng116, liximing86 }@gmail.com\\n\\n# Abstract\\nAspect-based sentiment analysis (ABSA), a fine-grained sentiment classification task, has received much attention recently. Many works investigate sentiment information through opinion words, such as “good” and “bad”. However, implicit sentiment data widely exists in the ABSA dataset, whose sentiment polarity is hard to determine due to the lack of distinct opinion words. To deal with implicit sentiment, this paper proposes an ABSA method that integrates explicit sentiment augmentations (ABSA-ESA) to add more sentiment clues. We propose an ABSA-specific explicit sentiment generation method to create such augmentations. Specifically, we post-train T5 by rule-based data and employ three strategies to constrain the sentiment polarity and aspect term of the generated augmentations. We employ Syntax Distance Weighting and Unlikelihood Contrastive Regularization in the training procedure to guide the model to generate the explicit opinion words with the same polarity as the input sentence. Meanwhile, we utilize the Constrained Beam Search to ensure the augmentations are aspect-related. We test ABSA-ESA on two ABSA benchmarks. The results show that ABSA-ESA outperforms the SOTA baselines on implicit and explicit sentiment accuracy.\\n\\n# Introduction\\nA spectbased Sentiment A nalysis ( ABSA ) aims to induce predictive models over manually annotated sentences to identify the sentiment polarity towards each specific aspect term (Wang et al. 2022a; Li et al. 2022). Taking the second sentence in Fig. 1 (a) as an example, the task aims to automatically identify the sentiment polarities of its aspect terms “outside ” ( Negative ) and “ atmosphere ” ( Positive )potentially with the corresponding opinion words “crushed” and “nice”. Due to its popularity, ABSA has been widely applied in many real-world scenarios, and accordingly, it is one of the most significant tasks in the natural language processing community (Yang et al. 2023; Ouyang et al. 2023).  \\n\\nTo handle the task of ABSA, many studies have been investigated during the past decade. Broadly speaking, the focus of recent work is on how to generate more discriminative  \\n\\n(a) 1. The fried rice (Positive) is amazing here . 2. It is crushed at outside (Negative), but the minute you walk inside, it has a nice atmosphere (Positive).   \\n(b) $\\\\vec{1}$ . Our server checked on us maybe twice during the entire meal (Negative). 2. All the money went into the interior decoration (Positive), none of it went to the chefs (Negative).  \\n\\nrepresentations for aspect terms to enhance the identification performance of sentiment polarity. Some early studies generate strong aspect term representations by directly employing deep neural encoders, such as LSTM (Tang et al. 2016; Wang et al. 2016; Cheng et al. 2017) and pre-trained language models (Xu et al. 2020; Dai et al. 2021). Beyond them, to further link the aspect terms and opinion words, some studies build dependency trees of sentences and then generate aspect term representations by employing graph convolution networks (GCN) (Sun et al. 2019; Wang et al. 2020; Chen, Teng, and Zhang 2020; Li et al. 2021a).  \\n\\nThe success of the GCN-based approach underscores the pivotal role that opinion words play in the realm of ABSA. However, recent research has highlighted a complex scenario characterized by a lack of distinct opinion words, termed ”implicit sentiment” (Li et al. 2021b; Wang et al. 2022b). To delve into this phenomenon, we select four examples from the Rest.14 to compare the implicit and explicit sentiment sentences. In the context of Fig.1(a), the sentiment is discernible due to distinct opinion words. In contrast, as shown in Fig.1(b), unraveling the sentiment associated with aspect terms such as ”meal,” ”interior decoration,” and ”chefs” is challenging. Implicit sentiment is a prevalent occurrence within ABSA datasets and it is hard to deal with (Li et al. 2021b).  \\n\\nTo tackle the challenge mentioned above, in the paper, we design a novel ABSA method by integrating Explicit Sentiment A ugmentations ( ABSA-ESA ). Such augmentations provide more sentiment clues for predicting sentiment polarity. We add them after the corresponding input sentence, forming new ABSA training data. To obtain the augmentations, we design an ABSA-specific explicit sentiment generation method. We aim to generate the sentences explicitly conveying the same sentiment polarity as their corresponding input sentences, targeting the same (or similar) aspect terms . We post-train the generation model T5 (Raffel et al. 2020) by the rule-based data selected in the ABSA dataset, making the generated augmentations comply with the above requirements. Furthermore, we introduce three strategies to confine the generated augmentations about their sentiment polarity and aspect terms. Specifically, in the training procedure, we employ the Syntax Distance Weighting and Unlikelihood Contrastive Regularization to lead the model to generate explicit opinion words with the same polarity as the input sentence. Subsequently, when engendering the augmentations, we employ the Constrained Beam Search to ensure the augmentations are aspect-related.  \\n\\nTo sum up, our contributions can be listed as follows:  \\n\\n• We propose a novel ABSA framework named ABSAESA, which focuses on solving the implicit sentiment issue by generating explicit sentiment augmentations.   \\n• We propose an ABSA-specific explicit sentiment generation method that generates augmentations with distinct opinion words for specific aspect terms.   \\n• Empirical results on two ABSA benchmarks show that ABSA-ESA outperforms other methods on both explicit and implicit accuracy.\\npaper_title: MvP: Multi-view Prompting Improves Aspect Sentiment Tuple Prediction.\\npaper_metainfo: Conf_Paper_Meta_Data_ACL_2023_with_whole_text.db\\nchunk_id: 1\\n# 1 Introduction\\nAspect-based sentiment analysis (ABSA) aims to predict tuples of sentiment elements of interest for a given text. There are four sentiment elements that constitute the main line of ABSA research: aspect term $(a)$ , aspect category $(c)$ , opinion term $(o)$ and sentiment polarity ( s) ( Zhang et al. ,2022 ). Given an example sentence, “I love the sushi badly!”, the corresponding elements are “sushi”, “food quality”, “love” and “positive”, respectively. Early studies focus on a single sentiment element like aspect term (Liu et al. ,2015 ;Ma et al. ,2019 ), aspect category (Zhou et al. ,2015 ) or sentiment polarity ( Wang et al. ,2016 ;Chen et al. ,2017 ). Recent works propose compound ABSA tasks involving multiple associated elements, such as aspect sentiment triplet extraction (ASTE) ( Peng et al. ,2020 ), target aspect sentiment detection (TASD) ( Wan et al. ,2020 ), aspect sentiment quad prediction (ASQP) (Zhang et al. ,2021a ) and aspect category opinion sentiment (ACOS) ( Cai et al. ,2020a ). Their target formats are shown in Table 1 .  \\n\\nTable 1: Aspect sentiment tuple prediction tasks with their corresponding outputs. Notably, although both ACOS and ASQP are the most complex quadratic prediction tasks, ACOS focuses on implicit aspects and opinions compared to ASQP. Detailed tasks and dataset statistics are shown in Appendix A .  \\n\\n\\n<html><body><table><tr><td>Task Output</td></tr><tr><td>Aspect Category Opinion Sentiment (ACOS) a,c,o,s AspectSentiment Quad Prediction (ASQP) a, 0.S AspectSentiment Triplet Extraction (ASTE) a, 0,S Target Aspect Sentiment tDetection (TASD) a, C,S</td></tr></table></body></html>  \\n\\nRecently, generative methods have been used to handle various ABSA tasks uniformly and achieved good performance ( Zhang et al. ,2022 ), where the common practice is to generate a sequence of sentiment elements in a specified format to leverage label semantics. To be specific, they use class index ( Yan et al. ,2021 ), sentiment element sequence (Zhang et al. ,2021d ), natural language ( Liu et al. ,2021a ;Zhang et al. ,2021b ), structured extraction schema ( Lu et al. ,2022b ) or opinion tree ( Bao et al. ,2022 ) as the target of the generation models.  \\n\\nHowever, previous works usually generate the sequence of sentiment elements in a left-to-right fixed order, which ignores the influence of the interdependence of the elements in a sentiment tuple and the diversity of language expression on the targets. For example, the $\\\\ ^{\\\\star}c\\\\Rightarrow s\\\\Rightarrow a\\\\Rightarrow o^{,\\\\flat}$ order in P ARAPHRASE (Zhang et al. ,2021b ) (Figure 1 ). This single-order generation has the following potential drawbacks: (1) Incompleteness, tuple prediction is not naturally a text generation task, the relationship among elements is not ordered but interdependent; (2) Instability, as shown in a study by Hu et al. (2022 ), the performance of different target template orders differs significantly; (3) Error accumulation, the previous prediction errors will be accumulated and affect later predictions.  \\n\\n  \\nFigure 1: Compared with predicting in a single order, M VP proposes element-order prompt learning to control the prediction order of sentiment element. M VP contains three steps: $\\\\textcircled{1}$ permutes multiple elements to form order prompts and constructs an appropriate subset in terms of conditional generation scores; $\\\\circledcirc$ generates multiple sequences consisting of tuples from different views based on the prompt subset. The element order of each tuple accords with the prompt in the input; $\\\\circled{3}$ aggregates the multiple predictions and obtains the final output.  \\n\\nTo address the above challenges, we propose Multiview Prompting (M VP) that aggregates sentiment elements predicted in different orders, leveraging the intuition of solving problems from different views in human reasoning and decision (Stanovich and West ,2000 ). Inspired by prompt chaining ( Liu et al. ,2021b ;Wei et al. ,2022b ;Wang et al. ,2022b ,a ), M VP introduces element orderbased prompt learning to control the prediction order of sentiment elements, enabling diverse target expressions. Compared to single-order generation, MVP mitigates the incompleteness and instability of a fixed order by receiving information from multiple views, while alleviating the potential error accumulation of generative methods via permutation of elements (Figure 1 ). Besides, M VP is naturally suited for training a single model to solve multiple ABSA tasks as combinations of elements, adaptively enabling knowledge transfer from related  \\n\\ntuple prediction tasks.  \\n\\nWe conduct extensive experiments on main aspect sentiment tuple prediction tasks, including ASQP, ACOS, ASTE and TASD. Empirical results show the superiority of M VP in supervised, lowresource, and cross-task transfer settings. In supervised settings, the single-task and multi-task MVP outperform the state-of-the-art by $1.34\\\\%$ and $1.69\\\\%$ absolute F1 scores on all tasks, respectively. At low resource settings, M VP has sizable improvement over strong baselines, and cross-task transfer brings a more remarkable improvement.  \\n\\nOur major contributions are as follows:  \\n\\n1) We introduce M VP, an element order-based prompt learning method that improves sentiment tuple prediction by aggregating multi-view results.  \\n\\n2) M VP naturally allows us to train a single model simultaneously on all tasks. To the best of our knowledge, the multi-tasking M VP is the first single model that substantially outperforms task-specific models on various ABSA tasks.  \\n\\n3) Experiments show that M VP significantly advances the state-of-the-art on 10 datasets of 4 tasks and is quite effective in low-resource settings.\\npaper_title: Counterfactual-Enhanced Information Bottleneck for Aspect-Based Sentiment Analysis\\npaper_metainfo: Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db\\nchunk_id: 2\\n# Related Work\\n\\n# Aspect-Based Sentiment Analysis\\nAs an essential task in natural language processing, sentiment analysis is commonly studied at document-level or sentence-level, which makes distinguishing sentiment polarities of different aspects in a single document or sentence difficult. To address this limitation, aspect-based sentiment analysis (ABSA), a fine-grained sentiment analysis task, is proposed to identify the sentiment polarity towards a specific aspect within a sentence or document.  \\n\\nSo far, deep neural networks have dominated the literature on ABSA. Earlier approaches centered around devising diverse attention mechanisms to learn attention-based representations of the context and the target aspect, which implicitly captured the semantic relationship between the given aspect and its context (Wang et al. 2016; Ma et al. 2017; Lei et al. 2019). For example, Wang et al. (2016) first proposed attention-based LSTMs to capture relevant sentiment information from the context given the target aspect. Ma et al. (2017) introduced an interactive attention to interactively learn the attention-aware representations of the target aspect and its context.  \\n\\nIn another trend, several studies focus on explicitly capturing syntax-aware features for the target aspect by leveraging syntactic knowledge and graph neural networks (Huang and Carley 2019; Wang et al. 2020; Tian, Chen, and Song 2021; Liang et al. 2022). The key idea of these methods involves exploiting the syntactic structures, such as syntax dependency trees, to build graphs. Then, graph convolutional networks (GCNs) (Tian, Chen, and Song 2021; Liang et al. 2022) or graph attention networks (GATs) (Huang and Carley 2019; Wang et al. 2020) can be utilized to aggregate sentiment information from the syntactically adjacent nodes to the target aspect node.  \\n\\nMore recently, the pre-trained language models (PLMs), such as BERT (Devlin et al. 2019) and RoBERTa (Liu et al. 2019), have been applied to ABSA and yielded state-of-theart results (Song et al. 2019; Jiang et al. 2019; Wang et al. 2020; Zhang, Zhou, and Wang 2022). These methods either employed BERT/RoBERTa as an embedding layer to acquire better initial embeddings (Wang et al. 2020; Jiang et al. 2019) or fine-tuned BERT/RoBERTa-based models by incorporating a task-specific classification layer (Xu et al. 2019). They absorbed the merit of rich linguistic and world knowledge contained in PLMs.\\n\\n# Spurious Correlation Reduction in NLP\\nDespite the preliminary success, deep neural networks are notoriously prone to learning spurious correlations between superficial feature patterns and the predicted label. For instance, models can achieve promising results in natural language inference (NLI) without capturing the semantic correlations between hypothesis and premises, due to the reliance on specific linguistic patterns in hypothesis (Gururangan et al. 2018) or superficial heuristics between the input text pairs (McCoy, Pavlick, and Linzen 2019). Similar biases have also been revealed in other tasks, including question answering (Jia and Liang 2017) and reading comprehension (Kaushik and Lipton 2018). These models are “right for the wrong reasons”, which results in poor robustness when the data distribution shifts.  \\n\\nExisting solutions to mitigate the spurious correlation problem can be roughly grouped into two categories: (1)  \\n\\n  \\nFigure 2: The overview of our CEIB, encompassing two primary modules: (a) counterfactual data augmentation module that employed LLM to generate the counterfactual data and (b) information bottleneck module with a factual-counterfactual balance setting to learn a more robust ABSA model.  \\n\\ndata augmentation (Zellers et al. 2018; Nie et al. 2020; Wang and Culotta 2021; Wu et al. 2022), and (2) ensemble learning (Clark, Yatskar, and Zettlemoyer 2020; Stacey et al. 2020; Sanh et al. 2021; Tian et al. 2022).  \\n\\nThe key idea of the data augmentation-based methods is to generate adversarial samples without the superficial patterns or spurious associations to alleviate the dataset bias, thus training more robust models. For instance, Zellers et al. (2018) proposed an adversarial filtering method to generate counterfactual samples and filter them in an adversarial way, which reduced spurious stylistic artifacts in the original dataset. Nie et al. (2020) augmented the original training dataset with human-written samples which exposed the model’s brittleness on spurious correlations in an iterative human-in-the-loop manner. Wang and Culotta (2021) introduced a dataset de-biasing paradigm from the causaltheoretic perspective, which generated causally counterfactual data to train debiased models.  \\n\\nEnsemble learning-based methods proposed to leverage bias-only models to capture superficial features or shallow patterns presented in the training data, and then train a debiased model with the detected spurious correlations. For instance, Stacey et al. (2020) designed a classifier to learn the biases and discouraged the hypothesis encoder from learning them, which in turn updated the biased classifier in an adversarial learning way. Clark, Yatskar, and Zettlemoyer (2020) leveraged a low-capacity model as the bias-only model to capture simple patterns and down-weighted the corresponding loss to train a more robust model via ensemble learning. Tian et al. (2022) detected the spurious correlations in the training dataset based on the causal inference theories and incorporated a new counterfactual model with the factual model to mitigate the bias.  \\n\\nIn this paper, we reduce spurious correlations for robust ABSA by taking benefits of both the data augmentationbased and ensemble learning-based approaches. We first generate counterfactual data where the spurious correlations do not hold in order to encourage the trained model to capture semantically relevant opinion words for the target aspect. Then, we employ the IB principle to balance the predictive information of the original factual data and the augmented counterfactual data to learn a more robust ABSA model in an ensemble manner.\\npaper_title: Cross-Domain Data Augmentation with Domain-Adaptive Language Modeling for Aspect-Based Sentiment Analysis.\\npaper_metainfo: Conf_Paper_Meta_Data_ACL_2023_with_whole_text.db\\nchunk_id: 1\\n# 2 Related Work\\n\\n# 2.1 Aspect-Based Sentiment Analysis (ABSA)\\nAs an important task in sentiment analysis, ABSA has been extensively studied in the last decade. Earlier works mainly focus on two subtasks of ABSA, i.e., aspect extraction (AE) ( Liu et al. ,2015 ;Chen and Qian ,2020a ) and aspect-based sentiment classification (ASC) ( Zhang et al. ,2016 ;Chen et al. ,2017 ;Sun et al. ,2019 ;Wang et al. ,2020 ). Recently, many supervised methods are proposed to solve the two sub-tasks in an end-to-end manner, which either resort to multi-task learning to exploit the relations between AE and ASC ( Luo et al. ,2019 ;He et al. ,2019 ;Chen and Qian ,2020b ) or employ a collapsed tagging scheme to combine AE and ASC into a unified label space and formulate the task as a sequence labeling problem ( Wang et al. ,2018 ;Li et al. ,2019a ,b). Despite obtaining promising results on several benchmark datasets, these methods suffer from the lack of annotated data in many emerging domains. To alleviate this issue, we aim to propose an unsupervised domain adaptation method to generate sufficient labeled data for ABSA in any target domain.\\n\\n# 2.2 Unsupervised Domain Adaptation\\nIn the literature, a myriad of unsupervised domain adaptation methods have been proposed for coarsegrained sentiment analysis ( Zhuang et al. ,2020 ), including pivot-based methods ( Blitzer et al. ,2007 ;Yu and Jiang ,2016 ;Ziser and Reichart ,2018 ;Xi et al. ,2020 ), auto-encoders ( Glorot et al. ,2011 ;Zhou et al. ,2016 ), domain adversarial networks (Ganin and Lempitsky ,2015 ;Ganin et al. ,2016 ;Li et al. ,2018 ), and semi-supervised methods ( He et al. ,2018 ;Ye et al. ,2020 ). These methods primarily focus on learning domain-invariant representations to alleviate the distribution discrepancy across domains. Inspired by the success of these representation-based methods, a few recent studies have adapted them to the cross-domain ABSA task, in which the key idea is to learn a shared representation for each word or aspect term across domains ( Ding et al. ,2017 ;Wang and Pan ,2018 ,2019 ,2020 ;Li et al. ,2019c ;Zeng et al. ,2022 ;Chen and Qian ,2022 ). Moreover, Lekhtman et al. (2021 )proposed a customized pre-training approach with aspect category shift for the aspect extraction task.  \\n\\nDespite obtaining promising results, the major limitation of these aforementioned methods for cross-domain ABSA is that their models for the main ABSA task is solely trained on the sourcedomain labeled data. Thus, their models are insensitive to target-specific features. To address this issue, some studies have explored a Cross-Domain Data Augmentation framework (CDDA) to directly generate much target-domain labeled data, including MLM-based CDDA ( Yu et al. ,2021 ;Yang et al. ,2022 ) and Seq2Seq-based CDDA ( Chen et al. ,2021 ;Li et al. ,2022 ). However, the generated data by these methods has several limitations including 1) preserving many source-specific attributes such as syntactic structures; 2) lack of fluency and diversity. Thus, in this work, we aim to propose a new data augmentation framework that can generate fluent target-domain labeled data without any source-specific attributes.\\n\\n# 3 Methodology\\n\\n# 3.1 Problem Definition and Notations\\nFollowing previous studies ( Li et al. ,2019c ), we formulate ABSA and AE as a sequence labeling problem. Given a sentence with $n$ words $\\\\pmb{x}\\\\,=\\\\,\\\\{w_{1},w_{2},...,w_{n}\\\\}$ , the goal is to predict its corresponding label sequence $\\\\pmb{y}=\\\\{y_{1},y_{2},...,y_{n}\\\\}$ ,$y_{j}\\\\,\\\\in\\\\,\\\\bigl\\\\{{\\\\mathsf{B}}{\\\\mathsf{-}}{\\\\mathsf{P}}{\\\\mathsf{O}}{\\\\mathsf{S}},{\\\\mathsf{I}}{\\\\mathsf{-}}{\\\\mathsf{P}}{\\\\mathsf{O}}{\\\\mathsf{S}},{\\\\mathsf{B}}{\\\\mathsf{-}}{\\\\mathsf{N}}{\\\\mathsf{E}}{\\\\mathsf{G}},{\\\\mathsf{I}}{\\\\mathsf{-}}{\\\\mathsf{I}}$ EG ,B-NEU ,$\\\\mathsf{I}\\\\!-\\\\!\\\\mathsf{N E U},0\\\\!\\\\}$ }for ABSA and $y_{j}\\\\in\\\\{{\\\\mathsf{B}},{\\\\mathsf{I}},{\\\\mathsf{O}}\\\\}$ ∈{ }for AE.  \\n\\nIn this work, we focus on the unsupervised domain adaptation setting, in which the source domain has enough labeled data and the target domain only has unlabeled data. Let denote a set of source-domain labeled data, and $\\\\mathcal{D}^{S}=\\\\{(\\\\pmb{x}_{i}^{s},\\\\pmb{y}_{i}^{s})\\\\}_{i=1}^{N^{s}}$ }$\\\\mathbf{\\\\mathcal{D}}^{T}\\\\,=\\\\,\\\\{\\\\mathbf{x}_{i}^{t}\\\\}_{i=1}^{N^{t}}$ data. The goal is to leverage dict the label sequences of test data from the target }a set of target-domain unlabeled $\\\\mathcal{D}^{S}$ and $\\\\mathcal{D}^{T}$ to predomain.\\n\\n# 3.2 Overview\\nAs illustrated in Figure 2 , our Cross-Domain Data Augmentation framework contains three key stages, including 1) Domain-Adaptive Pseudo Labeling, 2) Domain-Adaptive Language Modeling, and 3) Target-Domain Data Generation. In the first stage, an aspect-aware domain adaptation model is trained to assign pseudo labels to unlabeled data in the target domain. In the second stage, the labeled source data and the pseudo-labeled target data are used to train a domain-adaptive language model, which integrates data generation and sequence labeling in a unified architecture to capture the transferable context and annotation across domains. After training the DALM, the last stage uses probabilitybased generation strategy to generate diverse targetdomain data with fine-grained annotations in an autoregressive manner.\\n\\n# 3.3 Domain-Adaptive Pseudo Labeling\\nIn this stage, our goal is to assign the pseudo labels to each unlabeled data in the target domain. Since the data distribution of the source domain is different from that of the target domain, directly training a classifier on the labeled source data to predict the pseudo labels of the unlabeled target data will bring much noise. Thus, it is necessary to alleviate the domain discrepancy to improve the quality of pseudo-labels. Since aspect terms are shown to play a crucial role in ABSA ( Gong et al. ,2020 ), we attempt to explicitly minimize the distance between source-domain and target-domain aspect term representations via Maximum Mean Discrepancy (MMD) ( Gretton et al. ,2012 ).  \\n\\nand the unlabeled ta the aspect terms in extract the aspect terms in based algorithm named Double Propagation ( Specifically, given the labe D$\\\\mathcal{D}^{S}$ data via D$\\\\mathcal{D}^{T}$ D$\\\\mathcal{D}^{T}$ gold labels and based on a rule, we first obtain source data Qiu $\\\\mathcal{D}^{S}$ et al. ,2011 ). Let us use $\\\\pmb{x}^{d}=\\\\{\\\\bar{w_{1}^{d}},\\\\bar{w_{2}^{d}},...,w_{n}^{d}\\\\}$ }to denote a source or target domain sentence and use $\\\\pmb{a}^{d}\\\\,=\\\\,\\\\{w_{i}^{d},...,w_{j}^{d}\\\\}$ to denote e aspect terms in the sentence, where then employ a pre-trained BERT model to obtain $d\\\\in\\\\{s,t\\\\}$ ∈{ }. We the hidden representation of the sentence $\\\\mathbf{H}^{d}=$ $\\\\{\\\\mathbf{h}_{1}^{d},\\\\mathbf{h}_{2}^{d},...,\\\\mathbf{h}_{n}^{\\\\bar{d}}\\\\}$ }and the asp epresentation $\\\\mathbf{a}^{d}=g(\\\\mathbf{h}_{i}^{d},...,\\\\mathbf{h}_{j}^{d})$ , where $\\\\mathbf{h}^{d}\\\\,\\\\in\\\\,\\\\mathbb{R}^{r}$ ∈,$r$ refers to the hidden dimension, and $g(\\\\cdot)$ denotes the meanpooling operation. Next, we propose an aspectlevel MMD loss to alleviate the distribution discrepancy across domains as follows:  \\n\\n  \\nFigure 2: Overview of cross-domain Data Augmentation with Domain-Adaptive Language Modeling $(\\\\mathrm{DA}^{2}\\\\mathrm{LM})$ .  \\n\\n$$\\n\\\\begin{array}{r l}&{\\\\mathcal{L}_{\\\\mathrm{mmd}}=\\\\mathrm{d}_{k}^{2}\\\\big(\\\\mathcal{D}_{a}^{S},\\\\mathcal{D}_{a}^{T}\\\\big)=\\\\cfrac{1}{\\\\big(N_{a}^{s}\\\\big)^{2}}\\\\sum_{i,j}^{N_{a}^{s}}k\\\\big(\\\\mathbf{a}_{i}^{s},\\\\mathbf{a}_{j}^{s}\\\\big)+}\\\\\\\\ &{\\\\cfrac{1}{\\\\big(N_{a}^{t}\\\\big)^{2}}\\\\sum_{i,j}^{N_{a}^{t}}k\\\\big(\\\\mathbf{a}_{i}^{t},\\\\mathbf{a}_{j}^{t}\\\\big)-\\\\cfrac{2}{N_{a}^{s}N_{a}^{t}}\\\\sum_{i}^{N_{a}^{s}}\\\\sum_{j}^{N_{a}^{t}}k\\\\big(\\\\mathbf{a}_{i}^{s},\\\\mathbf{a}_{j}^{t}\\\\big),}\\\\end{array}\\n$$  \\n\\nwhere aspect term representations in the source domain $\\\\mathcal{D}_{a}^{S}$ and $\\\\mathcal{D}_{a}^{T}$ respectively denote the sets of and the target domain, $N_{a}^{s}$ and $N_{a}^{t}$ refer to the number of aspect terms in the two domains, and $k(\\\\cdot)$ denotes the Gaussian Kernel function.  \\n\\nMeanwhile, for each source sample, the hidden representation $\\\\mathbf{H}^{s}$ is fed into a Conditional Random Field (CRF) layer to predict the label sequence for the ABSA or AE task $p(\\\\pmb{y}^{s}|\\\\mathbf{H}^{s})$ . The goal is to minimize the negative log-probability of the correct label sequence of each source-domain sample:  \\n\\n$$\\n\\\\mathcal{L}_{\\\\mathrm{crf}}=-\\\\sum_{i=1}^{N^{s}}\\\\log p(\\\\boldsymbol{y}_{i}^{s}|\\\\mathbf{H}_{i}^{s}).\\n$$  \\n\\nThe CRF loss for the ABSA or AE task and the aspect-level MMD loss are combined to train the base model $C_{b}$ :  \\n\\n$$\\n\\\\begin{array}{r}{\\\\mathcal{L}=\\\\mathcal{L}_{\\\\mathrm{crf}}+\\\\alpha\\\\mathcal{L}_{\\\\mathrm{mmd}},}\\\\end{array}\\n$$  \\n\\nwhere $\\\\alpha$ is the hyper-parameter.  \\n\\nFinally, we use $C_{b}$ to assign pseudo labels $\\\\{(\\\\pmb{x}_{i}^{p t},\\\\pmb{y}_{i}^{p t})\\\\}_{i=1}^{N^{t}}$ to each sample in .$\\\\mathcal{D}^{T}$ , and obtain $\\\\begin{array}{r l}{\\\\mathcal{D}^{P T}}&{{}=}\\\\end{array}$\\npaper_title: Aspect-Based Sentiment Analysis with Explicit Sentiment Augmentations\\npaper_metainfo: Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db\\nchunk_id: 1\\n# Related Work\\n\\n# Aspect-based Sentiment Analysis\\nAspect-Based Sentiment Analysis (ABSA) methods primarily focus on integrating sentiment information from contextual words into aspect terms. In earlier approaches, this was often achieved by utilizing LSTM or Bi-LSTM as encoders (Tang et al. 2016; Wang et al. 2016; Cheng et al. 2017). Consequently, recent advancements have embraced the Attention mechanism as the preferred encoder (Tang et al. 2016; Wang et al. 2016; Cheng et al. 2017). Notably, leveraging pre-trained language models has emerged as the prevailing trend in ABSA (Xu et al. 2020; Dai et al. 2021). Furthermore, to establish stronger connections between aspect terms and opinion words, numerous studies have delved into constructing dependency trees within sentences and refining aspect term representations using Graph Convolutional Networks (GCNs) (Sun et al. 2019; Wang et al. 2020; Li et al. 2021a).  \\n\\nConcurrently, alongside developing robust encoders, researchers have explored the enrichment of training data to provide external sentiment information for the model (He et al. 2019; Wang et al. 2022a; Yang et al. 2023). These additional data often lack fine-grained annotations and necessitate subsequent data processing. Addressing this, this paper integrates ABSA-specific augmentations into ABSA models, bypassing the need for extensive reprocessing.\\n\\n# Implicit Sentiment Analysis\\nImplicit sentiment classification, a pivotal subfield within sentiment analysis, was pioneered by Liu (2012), drawing significant scholarly interest. Initial works revolved around implicit sentiment at the sentence level (Deng, Wiebe, and Choi 2014; Choi, Wiebe, and Mihalcea 2017; Zhou et al. 2021a; Xu et al. 2022). Recent endeavors have shifted towards tackling implicit aspect-based sentiment classification (Li et al. 2021b; Wang et al. 2022b; Fei et al. 2023). A prevailing approach involves incorporating external knowledge to capture sentiment expression patterns. For instance, Xu et al. (2022) integrates external sentiment-related knowledge into sentence features, enhancing the model’s sentiment comprehension. Similarly, Li et al. (2021b) employs a post-training strategy with BERT, leveraging contrastive learning on expansive sentiment-annotated corpora. ABSAESA utilizes the data generated by the model instead of obtaining external knowledge.\\n\\n# Data Augmentation\\nWithin NLP, the data augmentation technique has gained substantial traction to expand the pool of available training instances. This approach finds widespread application across diverse domains, including text classification (Wu et al. 2022; Liu et al. 2022; Ouyang et al. 2022), neural machine translation (Lam, Schamoni, and Riezler 2022; Kambhatla, Born, and Sarkar 2022; Gao et al. 2019), and text generation (Bi, Li, and Huang 2021; Xu et al. 2021). Notably, recent strides in ABSA have similarly leveraged data augmentation (Chen, Faulkner, and Badyal 2022; Wang et al. 2022a; Hsu et al. 2021). However, their augmentation techniques tend to be relatively simple, e.g., token replacement, masked aspect prediction, and polarity reversal, limiting the semantic diversity of the enhanced samples. The augmentation method in this paper is based on the language model, which generates augmentations with rich sentiment information.\\n\\n# Our Proposed ABSA-ESA Model\\nIn this section, we introduce the proposed ABSA method named ABSA-ESA .\\n\\n# Overall Framework\\nGenerally speaking, ABSA methods take the review sentence $\\\\mathbf{s}\\\\doteq\\\\{s_{j}\\\\}_{j=1}^{M}$ and its corresponding aspect term $\\\\textbf{a}=$ $\\\\{a_{j}\\\\}_{j=1}^{|{\\\\bf a}|}$ as the model input, $M$ denotes the length of all polarity $y\\\\ \\\\in\\\\ \\\\mathcal{Y}\\\\ =$ $\\\\{\\\\mathsf{P o s i t i v e,N e g a t i v e,N e u t r a l}\\\\}$ {}for a . To deal with the sentences containing implicit sentiment, we extend this paradigm by introducing an augmented sentence ${\\\\bf{s}}^{\\\\prime}$ following the initial input s. This augmented sentence contains explicit sentiment tied to the aspect term a . For clarity, we present the comprehensive framework of ABSA-ESA in Figure 2.  \\n\\nTo generate the augmented sentence ${\\\\bf{s}}^{\\\\prime}$ , we propose an ABSA-specific explicit sentiment generation method. We post-train T5 by utilizing $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ as generation targets selected from the dataset. $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ has the same (or similar) aspect terms and sentiment as swhile also incorporating explicit sentiment expressions. Additionally, we utilize three strategies to guide the generation concerning sentiment polarity and aspect terms. During the training phase, a Syntax Distance Weighting strategy is implemented to prioritize context words closest to the aspect term in the dependency parse. Furthermore, we also gather $\\\\bar{\\\\mathbf{s}}^{\\\\prime}$ , which has the opposite sentiment of s, for Unlikelihood Contrastive Regularization. It instructs the model about undesirable word choices. When generating ${\\\\bf s}^{\\\\prime}$ , we employ Constrained Beam Search to ensure that the aspect term or its similar aspect is included in the augmentations and its context words are the most relevant to a .  \\n\\n  \\nFigure 2: Overall framework of ABSA-ESA.  \\n\\nNext, we introduce the details of the ABSA-specific explicit sentiment generation method.\\n\\n# Training Data Collection\\nTo train the explicit sentiment generation model, the initial step is to gather the training data. Given an input sentence sand its corresponding aspect term a , the generating target $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ must satisfy the following rules:  \\n\\n• The target sentence should incorporate the same (or similar) aspect term as the input sentence.   \\n• The target sentence should exhibit identical sentiment polarity to the input data.   \\n• The target sentence must contain explicit sentiment expressions .  \\n\\nTo obtain the target sentence $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ that satisfies the given rules, we begin by aggregating all aspect terms in dataset $\\\\mathcal{D}$ aspect term to construct the aspect term set $\\\\bar{\\\\bf a}_{i}$ is associated with a representation $\\\\bar{\\\\mathcal{A}}~=~\\\\{\\\\bar{\\\\mathbf{a}}_{i}\\\\}_{i=1}^{|\\\\mathcal{A}|}$ $r_{\\\\bar{\\\\mathbf{a}}_{i}}{}^{1}$ . Each acquired by consulting the GloVe embedding table (Pennington, Socher, and Manning 2014). Utilizing these represen$\\\\mathcal{R}~=~\\\\{r_{\\\\bar{\\\\mathbf{a}}_{i}}\\\\}_{i=1}^{|A|}$ , we formulate a similarity matrix $\\\\mathbf{C}\\\\,=\\\\,\\\\{c_{i j}\\\\}_{|A|\\\\times|A|}$ tween aspect terms {}|A|×|A| $\\\\bar{\\\\bf a}_{i}$ and here $\\\\bar{\\\\mathbf{a}}_{j}.\\\\,c_{i j}$ $c_{i j}$ represents the similarity beis computed by the cosine distance:  \\n\\n$$\\nc_{i j}=\\\\cos(r_{\\\\bar{\\\\mathbf{a}}_{i}},r_{\\\\bar{\\\\mathbf{a}}_{j}}).\\n$$  \\n\\nWith the similarity matrix $\\\\mathbf{C}$ available, we proceed to the selection of $\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ . According to Li et al. (2021b), the dataset $\\\\mathcal{D}$ $\\\\mathcal{D}_{i}$ Dsions, we choose the can be divided int . As sentences in $\\\\mathcal{D}_{e}$ ˆ$\\\\hat{\\\\mathbf{s}}^{\\\\prime}$ contain explicit sentiment expresplicit subset from this subset to fulfill the third $\\\\mathcal{D}_{e}$ and implicit subset rule above.  \\n\\nWe firs elect $k_{c}$ aspect terms fro $\\\\mathcal{R}$ , which are most similar to a , thereby f ing the set A $\\\\mathcal{A^{\\\\prime}}$ . Subsequently, extract sentences from and share the same sentiment as D$\\\\mathcal{D}_{e}$ containing aspect terms from s. This forms the candidate A target sentence s $\\\\hat{S}_{t}$ rom this set, we randomly choose a target sentence $\\\\hat{\\\\mathbf{s}}^{\\\\prime}\\\\in\\\\hat{S}_{t}$ ∈Sto generate training data $(\\\\mathbf{s},\\\\hat{\\\\mathbf{s}}^{\\\\prime},\\\\mathbf{a})$ with the input sentence and the corresponding aspect term. This process is iterated for all input sentences $\\\\mathbf{s}\\\\in\\\\mathcal{D}$ , resultward, we begin to post-train T5 by ing the final training dataset $\\\\hat{\\\\mathcal{D}}\\\\,=\\\\,\\\\{(\\\\mathbf{s}_{i},\\\\hat{\\\\mathbf{s}}_{i}^{\\\\prime},\\\\mathbf{a}_{i})\\\\}_{i=1}^{N}$ D.}. After\\npaper_title: BiSyn-GAT+: Bi-Syntax Aware Graph Attention Network for Aspect-based Sentiment Analysis\\npaper_metainfo: Conf_Paper_Meta_Data_ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics_with_whole_text.db\\nchunk_id: 0\\n# BiSyn-GAT $^{\\\\mp}$ : Bi-Syntax Aware Graph Attention Network for Aspect-based Sentiment Analysis\\nShuo Liang 1 , Wei Wei , Xian-Ling Mao 3 , Fei Wang 4 , Zhiyong He 5 Cognitive Computing and Intelligent Information Processing (CCIIP) Laboratory, School of Computer Science and Technology, Huazhong University of Science and Technology 3 School of Computer Science and Technology, Beijing Institute of Technology 4 Institute of Computig Technology, Chinese Academy of Sciences 5 Naval University of Engineering 1  ,2  ,3  ,4  ,5\\n\\n# Abstract\\nAspect-based sentiment analysis (ABSA) is a fine-grained sentiment analysis task that aims to align aspects and corresponding sentiments for aspect-specific sentiment polarity inference. It is challenging because a sentence may contain multiple aspects or complicated ( e.g., conditional, coordinating, or adversative) relations. Recently, exploiting dependency syntax information with graph neural networks has been the most popular trend. Despite its success, methods that heavily rely on the dependency tree pose challenges in accurately modeling the alignment of the aspects and their words indicative of sentiment, since the dependency tree may provide noisy signals of unrelated associations ( e.g., the “ conj ” relation between “ great ” and “ dreadful ” in Figure 2 ). In this paper, to alleviate this problem, we propose a Bi -Syn tax aware Graph At tention Network ( BiSyn-GAT+ ). Specifically, BiSyn-GAT+ fully exploits the syntax information ( e.g., phrase segmentation and hierarchical structure) of the constituent tree of a sentence to model the sentiment-aware context of every single aspect (called intra -context) and the sentiment relations across aspects (called inter -context) for learning. Experiments on four benchmark datasets demonstrate that BiSyn-GAT $^+$ outperforms the stateof-the-art methods consistently.\\n\\n# 1 Introduction\\nAspect-based sentiment analysis (ABSA) aims to identify the sentiment polarity towards a given aspect in the sentence. Many previous works ( Yang et al. ,2018 ;Li et al. ,2019 ) mainly focus on extracting sequence features via Recurrent Neural Networks (RNNs) or Convolution Neural Networks (CNNs) with attention mechanisms, which often assume that words closer to the target aspect are more likely to be related to its sentiment. However, the assumption might not be valid as exemplified in Figure 1 (a), “service” is obviously closer to “great” rather than “dreadful”, and these methods may assign the irrelevant opinion word “great” to “service” mistakenly.  \\n\\n  \\nFigure 1: Examples of ABSA task. Each underlined aspect is classified to corresponding sentiment polarity.  \\n\\n  \\nFigure 2: Dependency tree of “The food is great but the service and the environment are dreadful”. Two separate ellipses encircle its two clauses. The “conj” edge between “great” and “dreadful” is a noise.  \\n\\nTo mitigate this problem, there already exists several efforts ( Wang et al. ,2020a ;Chen et al. ,2020 )dedicated to research on how to effectively leverage non-sequential information ( e.g., syntactic information like dependency tree) via Graph Neural Networks (GNNs). Generally, a dependency tree ( i.e., Dep.Tree), linking the aspect terms to the syntactically related words, stays valid in the long-distance dependency problem. However, the inherent nature of Dep.Tree structure may introduce noise like the unrelated relations across clauses, such as “conj” relation between “great” and “dreadful\" in Figure 2 ,which discourages capturing the sentiment-aware context of each aspect, i.e., intra -context. Moreover, the Dep.Tree structure only reveals relations between words and, thereby, in most cases, is incapable of modeling complicated ( e.g., conditional, coordinating, or adversative) relations of sentences, therefore failing to capture sentiment relations between aspects, i.e., inter -context.  \\n\\n  \\nFigure 3: Constituent tree of the sentence “The food is great but the service and the environment are dreadful”. Context words are in rectangles and parsed phrase types are in rounded rectangles.  \\n\\nHence, in this paper, we consider fully exploiting the syntax information of the constituent tree to tackle the problem. Typically, a constituent tree (i.e., Con.Tree) often contains precise and discriminative phrase segmentation and hierarchical composition structure, which are helpful for correctly aligning the aspects and their corresponding words indicative of sentiment. The former can naturally divide a complicated sentence into multiple clauses, and the latter can discriminate different relations among aspects to infer the sentiment relations of different aspects. We illustrate this with an example in Figure 3 : (1) Clause “The food is great” and the clause “the service and environment are dreadful” are segmented by the phrase segmentation term “but”; (2) In Layer-1, the term “and” indicates the coordinating relation of “service” and “environment”, while the term “but” in Layer-3 reflects the adversative relation towards “food” and “service” (or “environment”).  \\n\\nThus, to better align aspect terms and corresponding sentiments, we propose a new framework, Bi -Syn tax aware Graph At tention Network (BiSyn-GAT+ ), to effectively leverage the syntax information of constituent tree by modeling intra -context and inter -context information. In particular, BiSyn-GAT $^+$ employs: 1) a syntax graph embedding to encode the intra -context of each aspect based on the fusion syntax information within the same clause in a bottom-up way, which combines the phrase-level syntax information of its constituent tree and the clause-level syntax information of its dependency tree. 2) an aspect-context graph consisting of phrase segmentation terms and all aspects to model the inter -context of each aspect. Specifically, it aggregates the sentiment information of other aspects according to the influence between the current aspect and its neighbor aspects, which is calculated based on aspect representations learned from bi-directional relations over the aspect context graph, respectively.  \\n\\nOur main contributions are as follows:  \\n\\n(1) To the best of our knowledge, this is the first work to exploit syntax information of constituent tree ( e.g., phrase segmentation and hierarchical structure) with GNNs for ABSA. Moreover, it shows superiority in the alignments between aspects and corresponding words indicative of sentiment.  \\n\\n(2) We propose a framework, Bi -Syn tax aware Graph At tention Network ( BiSyn-GAT+ ), to fully leverage syntax information of constituent tree (or, and dependency tree) by modeling the sentimentaware context of each single aspect and the sentiment relations across aspects.  \\n\\n(3) Extensive experiments on four datasets show that our proposed model achieves state-of-the-art performances.\\npaper_title: Counterfactual-Enhanced Information Bottleneck for Aspect-Based Sentiment Analysis\\npaper_metainfo: Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db\\nchunk_id: 1\\n# Introduction\\nAspect-based sentiment analysis (ABSA), which aims to identify the sentiment of a specific aspect in a sentence, has raised increasing interest in both academic and industrial communities (Zhang et al. 2022). For accurate and stable sentiment prediction in this fine-grained sentiment analysis task, it is essential to capture the context words expressing opinions towards the target aspect.  \\n\\nSo far, deep learning techniques have been predominant in the ABSA task. Deep neural networks can automatically and efficiently learn discriminative contextual representations of both the context and aspect without time-consuming human annotation (Negi and Buitelaar 2014). To model the semantic relationship between the target aspect and its context, various attention mechanisms have been proposed to learn interactive features of the context and aspect (Tang, Qin, and Liu 2016; Ma et al. 2017; Lei et al. 2019). In another line, several works explicitly capture syntax-aware features for the aspect by incorporating the syntactic knowledge with graph neural networks (Huang and Carley 2019; Wang et al. 2020; Liang et al. 2022). More recently, pre-trained language models (PLMs), such as BERT (Devlin et al. 2019), have been applied to the ABSA task, yielding state-of-theart results (Song et al. 2019; Xu et al. 2019; Jiang et al. 2019; Zhang, Zhou, and Wang 2022). Extensive linguistic knowledge learned from the large-scale textual corpus can be utilized to improve the performance for ABSA.  \\n\\n  \\nFigure 1: Spurious correlation between the context words “never had ” and the sentiment label “P OSITIVE ” in the restaurant dataset. We use counterfactual data featuring identical spurious context words while different sentiment labels to encourage the model to capture vital opinion words.  \\n\\nDespite the effectiveness of prior studies, few efforts are devoted to mitigating the spurious correlation problem for ABSA. Specifically, deep ABSA models appear to associate superficial patterns with predicted labels, which are held by most training samples but not intrinsic to the ABSA task. For example, as shown in Figure 1, due to the high cooccurrence of the context words “ never had ” and the sentiment label “P OSITIVE ” in the training corpus, deep models tend to learn the strong correlation between the context words “never had” and the label “P OSITIVE ”, rather than capturing the semantically crucial opinion expressions. As a result, models would fail to infer the ground-truth label “N EUTRAL ” for the testing instance, which contains the words “ never had ” without holding this spurious correlation. Under such an inductive bias, models that have achieved promising performance on the in-domain data would suffer from poor robustness against the out-of-distribution or more challenging data. One possible solution to tackle this challenge is to introduce counterfactual data with the similar spurious context words while opposite sentiment labels to motivate the counterfactual thinking (Wang et al. 2022) ability of the ABSA model. In this way, the model can pay more attention to semantically relevant opinion words for the target aspect. In addition, incorporating the original data with the augmented counterfactual data without considering their interactions would even exacerbate the model performance. Thus, it poses a non-trivial challenge to devise a strategy to effectively exploit the interactions between factual and counterfactual data for improving the robustness of the deep ABSA model.  \\n\\nIn light of this, we propose a CounterfactualEnhanced I nformation Bottleneck framework (called CEIB) to reduce spurious correlations for ABSA, aiming to improve the robustness of the deep ABSA model. The proposed CEIB framework learns a more robust model by taking benefits of both the large language model (LLM) to generate counterfactual data from the original training data and the information bottleneck (IB) principle to model interactions between the original data and augmented data. Specifically, we first devise a multi-pattern prompting method, utilizing LLM to generate high-quality counterfactual samples from the original training samples. Then, we employ the IB principle to discard spurious features from the input while retaining essential predictive information for the sentiment label. To enhance the capacity of CEIB in characterizing adversarial and out-of-distribution data, we separate the mutual information in the original IB objective into factual and counterfactual parts by leveraging the original factual sample and the generated counterfactual sample. By balancing the predictive information of these two parts, we can learn more robust and balanced representations for the ABSA task. The main contributions in this paper can be summarized as follows:  \\n\\n• We propose a novel CEIB framework for robust aspectbased sentiment analysis, which reduces spurious correlations by taking advantage of both the IB principle and counterfactual data augmentation, with the aim of learning a more robust ABSA model.   \\n• We devise a multi-pattern prompting-based method, utilizing LLM to generate high-quality counterfactual data, which are then leveraged to balance the predictive information of the original training data to learn effective and robust representations.   \\n• We conduct extensive experiments on five widely utilized benchmark ABSA datasets. Experimental results show that CEIB achieves better prediction and robustness performance compared to the strong competitors.\\npaper_title: AX-MABSA: A Framework for Extremely Weakly Supervised Multi-label Aspect Based Sentiment Analysis\\npaper_metainfo: Conf_Paper_Meta_Data_EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing_with_whole_text.db\\nchunk_id: 5\\n# 5 Experimental Evaluation\\nIn this section, we study the performance of the different algorithms on four datasets, compare them with different baselines, and discuss the qualitative analysis of our model performance.\\n\\n# 5.1 Evaluation Framework\\nWe evaluate our method in an End-to-End framework. The popularly used ABSA evaluation uses gold aspects as a part of input to predict the sentiment polarity of each gold aspect. However, when the task is unsupervised (almost), we do not expect to know the aspect categories beforehand, as has been explored in previous works involving sentiment mining alone. Thus, we follow the End-toEnd framework, which has two stages. In the first stage, given sentences, all the aspects are predicted. In the second stage, for each predicted aspect in the first stage, the corresponding sentiment polarity is predicted. Therefore, in our case, the first-stage output is the ACD output, which outputs aspect categories corresponding to each sentence. The second stage output is the ACSA output, which is a set of tuples consisting of (aspect category, sentiment polarity) pairs for each sentence. Therefore, if both the aspect category and sentiment polarity are predicted correctly then only, we consider it as a correct prediction. Thus, the performance is measured over all tuples (aspect, sentiment) in the gold data.\\n\\n# 5.2 Evaluation Metrics\\nWe consider two metrics for performance evaluation. For the ACD task, we report macro-averaged F1 score (or F1-macro) which is the average of F1-scores per class. For the ACSA task, we report macro-averaged F1-PN score (or macro F1-PN)  \\n\\n<html><body><table><tr><td rowspan=\"2\"></td><td rowspan=\"2\">SupervisionType</td><td rowspan=\"2\">Methods</td><td colspan=\"4\">ACD</td><td colspan=\"4\">ACSA</td></tr><tr><td>Rest-14</td><td>Rest-15</td><td>Rest-16</td><td>MAMS</td><td>Rest-14</td><td>Rest-15</td><td>Rest-16</td><td>MAMS</td></tr><tr><td rowspan=\"4\">Baselines</td><td>Random</td><td></td><td>22.50</td><td>21.12</td><td>19.03</td><td>16.45</td><td>08.40</td><td>08.46</td><td>07.16</td><td>05.39</td></tr><tr><td>Supervised</td><td>ACSA-Generation</td><td>91.41</td><td>83.56</td><td>87.11</td><td>89.23</td><td>78.43</td><td>71.91</td><td>73.76</td><td>70.30</td></tr><tr><td>WeaklySupervised Extremely</td><td>JASen</td><td>42.27</td><td>33.29</td><td>43.43</td><td>21.57</td><td>26.62</td><td>19.44</td><td>23.23</td><td>14.74</td></tr><tr><td>WeaklySupervised</td><td>X-Class</td><td>46.69</td><td>40.35</td><td>36.58</td><td>36.52</td><td>34.44</td><td>25.49</td><td>24.83</td><td>16.32</td></tr><tr><td rowspan=\"4\">Proposed</td><td></td><td>X-SABSA</td><td>56.16</td><td>58.87</td><td>42.77</td><td>37.72</td><td>39.66</td><td>42.55</td><td>31.46</td><td>19.60</td></tr><tr><td>Extremely</td><td>AX-SABSA</td><td>69.57</td><td>56.17</td><td>45.69</td><td>39.33</td><td>44.14</td><td>40.24</td><td>32.23</td><td>18.55</td></tr><tr><td>WeaklySupervised</td><td>X-MABSA</td><td>61.73</td><td>62.07</td><td>49.02</td><td>56.48</td><td>44.96</td><td>44.35</td><td>35.81</td><td>27.28</td></tr><tr><td></td><td>AX-MABSA</td><td>74.90</td><td>60.08</td><td>50.63</td><td>60.82</td><td>49.68</td><td>42.74</td><td>36.47</td><td>29.74</td></tr></table></body></html>  \\n\\nTable 2: Comparative Results for the ACD and End-to-End ACSA tasks. We report F1-macro score for ACD and F1-PN macro score for ACSA. X-SABSA: Proposed single label predictor model. AX-SABSA: Proposed single label predictor model where the candidate word for each class is also updated. X-MABSA: Proposed multi-label predictor model. AX-MABSA: Proposed multi-label predictor model where the candidate word for each class is also updated. Clustering algorithm used: mini batch k-means for ACD, and gmm for ACSA.  \\n\\nwhich is the mean of F1-scores of all aspect category, sentiment (positive, negative) pair tuples. The macro F1-PN is commonly used in different SemEval tasks ( Pontiki et al. ,2016 ).\\n\\n# 5.3 Empirical Results\\nComparative results of the ACD and ACSA tasks on different datasets are presented in Table 2 . The results show that we achieve far better performance than random baselines given that our approach is unsupervised. The improvement of our multi-label models (X-MABSA and AX-MABSA) is statistically significant at $\\\\mathsf{p}<0.01$ using paired t-test ( Hsu and Lachenbruch ,2014 ) compared to proposed single label models (X-SABSA and AX-SABSA) and weakly supervised baselines (X-Class, and JASen).  \\n\\nFor the ACD task, we achieve baseline results for all the datasets (ACSA module). We obtain F1- macro of 46.69, 40.35, 36.58, and 36.52 on Rest14, Rest-15, Rest-16, and MAMS dataset, respectively. The proposed X-SABSA model improves the performance significantly on all the datasets (F1-macro of 56.16, 58.87, 42.77, and 37.72 on Rest-14, Rest-15, Rest-16, and MAMS data, respectively). Within our proposed models, we find that our multi-label model X-MABSA performs better than single-label model X-SABSA on all datasets. Especially, on the MAMS dataset, it improves the performance significantly (F1-macro of 56.48). We also observe that the AX-MABSA model (i.e., when automatically selected candidate words are considered for class representation) further improves performance on Rest-14, Rest-16, and MAMS datasets (F1-macro of 74.90, 50.63, and 60.82). It shows that the AX-MABSA model is more generalized and works very well when class names are not present in the input data.  \\n\\nAs the ACSA task is framed as an End-to-End pipeline, we expect the performance to be lower than the often-used ACSA evaluation procedure. We achieve the baseline results (ACSA module) which are F1-PN-macro scores of 34.44, 25.49, 24.83, and 16.32 on Rest-14, Rest-15, Rest-16, and MAMS, respectively. We find that the proposed X-SABSA model improves the performance significantly over the baseline (F1-PN-macro of 39.66, 42.55, and 31.46 on Rest-14, Rest-15, and Rest-16, respectively). The multi-label model, X-MABSA improves the results further (F1-PNmacro of 44.96, 44.35, 35.81, and 27.28, respectively). We also observe that the AX-MABSA model improves the performance on Rest-14, Rest16, and MAMS data (F1-PN-macro of 49.68, 36.47, and 29.74, respectively).  \\n\\nWe observe that our proposed model performs significantly better than the random, and two weakly supervised baselines (X-Class and JASen) on both ACD and ACSA tasks. As our method is an extremely weakly supervised method, we do not expect our model to be better than the supervised model. However, in comparison to the supervised model (ACSA-generation), our method shows promising performance. For example, on the Rest-14 data, the supervised model achieves an F1-macro of 91.42 while our proposed model achieves an F1-macro of 74.90 for the ACD task. For the ACSA task, the proposed method performs decently compared to the supervised baseline. For example, on Rest-15, the supervised method achieves an F1-PN-macro of 71.91 while our method achieves an F1-PN-macro of 44.35.  \\n\\nIt is evident that our proposed method works comparatively poorly for ACSA task on the MAMS data. The reason for this is the presence of a remarkably high number of ‘neutral’ classes $(43.62\\\\%$ of total polarity labels). Selecting a single representative surface word for ‘neutral’ class is difficult as there is no association between any word and neutral sentences as compared to the ‘positive’ and ‘negative’ class. For example, the word ‘bad’ can be a representative of ‘negative’ class and the word ‘good’ can be the same of ‘positive’ class, but we found no such representative word for neutral class to perform well.  \\n\\nTable 3: Illustration of the proposed method using few examples   \\n\\n\\n<html><body><table><tr><td>Review</td><td>Actual</td><td>Predicted</td></tr><tr><td>The sashimi is always fresh and the rolls are innovative and delicious. While there\\'s a decent menu,it shouldn\\'t take ten minutes toget your</td><td>(food,positive) (food, positive),</td><td>(food, positive) (food, positive),</td></tr><tr><td>drinks and 45 for a dessert pizza. Who can\\'t decide on a single dish, the tapas menu allowed me to express</td><td>(service,negative)</td><td>(service,positive)</td></tr><tr><td>my true culinary self. Roof: very nice space (although I know 5 other rooftop bars just as</td><td>(food,negative), (menu,positive) (place, positive),</td><td>(menu,negative)</td></tr><tr><td>good),but the crowd was bunch of posers and the owner was a tool. Endless fun, awesome music, great staff!</td><td>(miscellaneous,neutral) (service, positive),</td><td>(place, positive), (ambience,negative) (service, positive),</td></tr><tr><td></td><td>(ambience,positive), (restaurant, positive)</td><td>(ambience,positive)</td></tr></table></body></html>\\npaper_title: Cross-Domain Data Augmentation with Domain-Adaptive Language Modeling for Aspect-Based Sentiment Analysis.\\npaper_metainfo: Conf_Paper_Meta_Data_ACL_2023_with_whole_text.db\\nchunk_id: 0\\n# Cross-Domain Data Augmentation with Domain-Adaptive Language Modeling for Aspect-Based Sentiment Analysis\\nJianfei $\\\\mathbf{Y_{u}}^{*}$ , Qiankun Zhao ∗and Rui Xia †School of Computer Science and Engineering, Nanjing University of Science and Technology, China {jfyu, kkzhao, rxia}@njust.edu.cn\\n\\n# Abstract\\nCross-domain Aspect-Based Sentiment Analysis (ABSA) aims to leverage the useful knowledge from a source domain to identify aspectsentiment pairs in sentences from a target domain. To tackle the task, several recent works explore a new unsupervised domain adaptation framework, i.e., Cross-Domain Data Augmentation (CDDA), aiming to directly generate much labeled target-domain data based on the labeled source-domain data. However, these CDDA methods still suffer from several issues: 1) preserving many source-specific attributes such as syntactic structures; 2) lack of fluency and coherence; 3) limiting the diversity of generated data. To address these issues, we propose a new cross-domain Data Augmentation approach based on Domain-Adaptive Language Modeling named $\\\\mathrm{DA^{2}L M}$ , which contains three stages: 1) assigning pseudo labels to unlabeled target-domain data; 2) unifying the process of token generation and labeling with a Domain-Adaptive Language Model (DALM) to learn the shared context and annotation across domains; 3) using the trained DALM to generate labeled target-domain data. Experiments show that $\\\\mathrm{DA^{2}L M}$ consistently outperforms previous feature adaptation and CDDA methods on both ABSA and Aspect Extraction tasks. The source code is publicly released at https://github.com/NUSTM/DALM .\\n\\n# 1 Introduction\\nAs an important task in sentiment analysis, AspectBased Sentiment Analysis (ABSA) aims to extract aspect terms from sentences and predict the sentiment polarity towards each aspect term ( Liu ,2012 ;Pontiki et al. ,2016 ). For example, given a sentence “The screen is broken \", the aspect term is screen and its sentiment polarity is Negative . With the advancements of deep learning techniques, a myriad of neural approaches have been proposed for ABSA and achieved promising results on several benchmark datasets ( Li et al. ,2019a ;He et al. ,2019 ;Chen and Qian ,2020b ). However, these methods heavily rely on labeled data with fine-grained annotation, which is often time-consuming and expensive to obtain for many emerging domains.  \\n\\n  \\nFigure 1: Comparison between different Cross-Domain Data Augmentation (CDDA) methods.  \\n\\nTo alleviate the reliance on labeled data, many previous works resorted to unsupervised domain adaptation techniques, which aim to transfer knowledge from a resource-rich source domain to a target domain only with unlabeled data ( Blitzer et al. ,2007 ;Pan et al. ,2010 ;Zhuang et al. ,2015 ). Most existing domain adaptation methods on the ABSA task focus on learning shared feature representations across domains ( Wang and Pan ,2018 ;Li et al. ,$2019c$ ;Gong et al. ,2020 ;Chen and Qian ,2021 ). Although these methods have obtained promising results, their models are only trained on the sourcedomain labeled data and thus insensitive to the important target-specific aspect and opinion terms.  \\n\\nTo address this limitation, several recent studies have explored a new domain adaptation framework named Cross-Domain Data Augmentation (CDDA), which aims to directly generate much target-domain labeled data based on the labeled data from the source domain. These existing methods can be summarized into two groups: Masked Language Model (MLM)-based CDDA ( Yu et al. ,2021 ;Yang et al. ,2022 ) and Sequence-to-Sequence (Seq2Seq)-based CDDA ( Chen et al. ,2021 ;Li et al. ,2022 ). As shown in Fig. 1 (a) and Fig. 1 (b), the core idea behind existing CDDA methods is to first mask source-specific words in the sourcedomain labeled data, followed by using either the well-trained MLM or Seq2Seq models to automatically generate target-specific words and labels in the masked positions. Despite achieving significant improvements over previous feature adaptation methods, these CDDA approaches still have several shortcomings: 1) they only mask source-specific words or phrases but preserve other source-specific attributes such as syntactic structures, which make the distribution of the generated data different from that of the real target-domain data; 2) replacing source-specific words with target-specific words may destruct the semantic meaning of the original sentence, making the generated data lack of fluency and coherence; 3) existing CDDA methods regard each source-domain sentence as the template, thus limiting the diversity of the generated data.  \\n\\nTo tackle these shortcomings, we propose a new cross-domain Data Augmentation approach based on Domain-Adaptive Language Modeling named $\\\\mathrm{DA^{2}L M}$ , which consists of three stages, including Domain-Adaptive Pseudo Labeling, DomainAdaptive Language Modeling, and Target-Domain Data Generation. Specifically, the labeled source data and unlabeled target data are first leveraged to train a base domain adaptation model, which is then used for predicting pseudo labels of unlabeled data in the target domain. Secondly, we design a novel Domain-Adaptive Language Model (DALM), and train it on the labeled source data and pseudo-labeled target data to learn the transferable context and label across domains. Different from most existing LMs, our DALM unifies the process of data generation and fine-grained annotation, aiming to simultaneously generate the next token and predict the label of the current token at each time step of the training stage. Finally, given the trained DALM, we employ it to generate many labeled target-domain data in an autoregressive manner with a probability-based generation strategy.  \\n\\nOur main contributions can be summarized as follows:  \\n\\n•We propose a three-stage framework named cross-domain Data Augmentation with Domain Adaptive Language Modeling $\\\\mathrm{(DA^{2}L M)}$ , which can generate a large amount of labeled targetdomain data for the cross-domain ABSA task.  \\n\\n•Under the framework, we devise a new domainadaptive language model, which unifies the process of data generation and labeling and captures the domain-invariant context and annotation for target-domain data generation.  \\n\\n•Experiments on four benchmark datasets demonstrate that our framework significantly outperforms a number of competitive domain adaptation methods on both ABSA and Aspect Extraction (AE) tasks. Further analysis on generated data shows the superiority of our framework in terms of data distribution, diversity, and fluency.\\npaper_title: Counterfactual-Enhanced Information Bottleneck for Aspect-Based Sentiment Analysis\\npaper_metainfo: Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db\\nchunk_id: 5\\n# Experimental Setup\\n\\n# Datasets\\nWe conduct our experiments on five benchmark ABSA datasets: REST 14 and LAP 14 from (Pontiki et al. 2014), REST 15 from (Pontiki et al. 2015), REST 16 from (Pontiki et al. 2016), and MAMS from (Jiang et al. 2019). We adopt the official data splits, which keep the same as in the original papers. We also use A RTS dataset (Xing et al. 2020), including R EST 14- ARTS and L AP 14- ARTS , to test the robustness of the ABSA models. Each instance in these datasets consists of a review sentence, a target aspect, and the sentiment polarity (i.e., P OSITIVE , N EGATIVE , N EUTRAL ) towards the target aspect. The detailed statistics of the utilized datasets are shown in Table 1.\\n\\n# Baselines and Evaluation Metrics\\nWe compare our CEIB approach with several state-of-theart ABSA methods based on BERT, including BERT-SPC (Song et al. 2019) that takes sentence-aspect pair as the input sequence of BERT to learn aspect-aware representation; BERT-PT (Xu et al. 2019) that post-trains BERT with external domain-specific corpus to improve the model performance; CapsNet-BERT (Jiang et al. 2019) that adopts the capsule network to capture semantic interactions of the aspect and its context; DGEDT-BERT (Tang et al. 2020) that proposes a dual-transformer network which jointly learns the flat and graph-based representations; RGATBERT (Wang et al. 2020) that adopts a relational graph attention network to encode the aspect-oriented syntactic dependency; DualGCN-BERT (Li et al. 2021) that designs a SynGCN module with rich syntactic knowledge and a SemGCN module to capture semantic correlations; TGCNBERT (Tian, Chen, and Song 2021) that introduces syntactic dependency types into GCN and adopts attentive layer ensemble to fully exploit the type information; SenticGCNBERT (Liang et al. 2022) that integrates affective knowledge into the dependency graph to learn the sentiment associations between the context and aspect; SSEGCN-BERT (Zhang, Zhou, and Wang 2022) that leverages both the syntactic and semantic information by aspect-aware attention mechanism and syntactic mask matrices.  \\n\\nThe Thirty-Eighth AAAI Conference on Artificial Intelligence (AAAI24)   \\n\\n\\n<html><body><table><tr><td rowspan=\"2\">Models</td><td colspan=\"2\">REST14 (%)</td><td colspan=\"2\">LAP14 (%)</td><td colspan=\"2\">REST15 (%)</td><td colspan=\"2\">REST16 (%)</td><td colspan=\"2\">MAMS (%)</td></tr><tr><td>Acc.</td><td>F1</td><td>Acc.</td><td>F1</td><td>Acc.</td><td>F1</td><td>Acc.</td><td>F1</td><td>Acc.</td><td>F1</td></tr><tr><td>BERT-SPC (Song et al. 2019)</td><td>84.11</td><td>76.68</td><td>77.59</td><td>73.28</td><td>83.48</td><td>66.18</td><td>90.10</td><td>74.16</td><td>83.98</td><td>83.41</td></tr><tr><td>BERT-PT (Xu et al. 2019)#</td><td>84.95</td><td>76.96</td><td>78.07</td><td>75.08</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>CapsNet-BERT (Jiang et al. 2019)b</td><td>85.36</td><td>78.41</td><td>78.97</td><td>75.66</td><td>82.10</td><td>65.57</td><td>90.10</td><td>75.15</td><td>83.76</td><td>83.15</td></tr><tr><td>DGEDT-BERT (Tang et al. 2020)#</td><td>86.30</td><td>80.00</td><td>79.80</td><td>75.60</td><td>84.00</td><td>71.00</td><td>91.90</td><td>79.00</td><td></td><td></td></tr><tr><td>RGAT-BERT (Wang et al. 2020)b</td><td>86.60</td><td>81.35</td><td>78.21</td><td>74.07</td><td>83.22</td><td>69.73</td><td>89.71</td><td>76.62</td><td>82.71</td><td>82.21</td></tr><tr><td>DualGCN-BERT (Li et al. 2021)#</td><td>87.13</td><td>81.16</td><td>81.80</td><td>78.10</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>TGCN-BERT (Tian et al. 2021)#</td><td>86.16</td><td>79.95</td><td>80.88</td><td>77.03</td><td>85.26</td><td>71.69</td><td>92.32</td><td>77.29</td><td>83.38</td><td>82.77</td></tr><tr><td>SenticGCN-BERT (Liang et al. 2022)#</td><td>86.92</td><td>81.03</td><td>82.12</td><td>79.05</td><td>85.32</td><td>71.28</td><td>91.97</td><td>79.56</td><td></td><td></td></tr><tr><td>SSEGCN-BERT (Zhang et al. 2022)#</td><td>87.31</td><td>81.09</td><td>81.01</td><td>77.96</td><td>=</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>CEIB (Ours)</td><td>87.77</td><td>82.08</td><td>82.92</td><td>79.50</td><td>86.16</td><td>72.97</td><td>92.86</td><td>81.08</td><td>84.95</td><td>84.41</td></tr><tr><td>w/o IB</td><td>85.54</td><td>78.46</td><td>77.43</td><td>75.96</td><td>84.76</td><td>69.44</td><td>91.40</td><td>76.96</td><td>83.28</td><td>83.81</td></tr><tr><td>w/o CDA</td><td>86.25</td><td>80.00</td><td>80.94</td><td>76.62</td><td>85.54</td><td>71.80</td><td>92.00</td><td>77.74</td><td>84.06</td><td>83.63</td></tr></table></body></html>\\n\\nTable 2: Main experimental results on five ABSA benchmark datasets. The results with ♯are retrieved from the corresponding original papers and ♭indicates our reproduced results. Best scores are in bold and the second best ones are underlined. All models are based on $\\\\mathbf{BERT}_{b a s e}$ .  \\n\\nTo evaluate the performance of the ABSA models, we adopt two widely-used metrics: Accuracy ( Acc. ) and macroaveraged F1 score ( F1 ). We report the averaged scores of 5 runs with random initialization to ensure the reliability and stability our experiments.\\n\\n# Implementation Details\\nThe proposed CEIB framework encompasses two parts: counterfactual data augmentation and information bottleneck. For the counterfactual data augmentation, we design 4 prompting patterns to improve the diversity of generated texts and alleviate the sensitivity to prompt templates simultaneously. The masking ratio dynamically ranges from 0 .3 to 0 .8 based on the length of the input sequence. That is, we set a smaller mask ratio for longer sentences. Employing these settings, we generate 10 counterfactual samples for each original training sample.  \\n\\nFor the training stage based on the information bottleneck, we adopt the PyTorch implemenetation of uncased $\\\\mathrm{BERT}_{b a s e}{}^{3}$ (12 layers and 768 hidden dimensions) as the base text encoder. We train all our models for 30 epochs. Adam is used as the optimizer with the initial learning rate as $5e^{-5}$ and the weight decay as $1e^{-4}$ . The hyper-parameter $\\\\alpha$ is set in range 0 .5 to 1 .0 and $\\\\beta$ for $l_{2}$ -norm regularization is adaptively set by the optimizer.\\n\\n# Experimental Results and Analysis\\n\\n# Main Results\\nThe experimental results of the ABSA methods on five benchmark datasets (i.e. R EST 14, L AP 14, R EST 15, REST 16, MAMS) are reported in Table 2. We can observe that CEIB substantially and consistently outperforms all compared baselines on the overall datasets in terms of both accuracy (Acc.) and macro-averaged F1 (F1) score, which verifies the effectiveness of our proposed approach. In particular, CEIB achieves the best improvement of $1.52\\\\%$ macro-F1 score compared with the best baseline (i.e. SenticGCN-BERT) on the R EST 16 dataset.  \\n\\nCompared with the competitive baselines SenticGCNBERT and SSEGCN-BERT that take advantages of both the rich semantic knowledge from BERT and syntactic information from syntax dependency structures, our CEIB achieves better performance. The advancement is benefited from both the counterfactual data augmentation and the information bottleneck which can reduce spurious correlations while capturing the crucial contexts from the training data, thus improving the performance on the testing data.\\npaper_title: A Span-level Bidirectional Network for Aspect Sentiment Triplet Extraction\\npaper_metainfo: Conf_Paper_Meta_Data_EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing_with_whole_text.db\\nchunk_id: 1\\n# 2 Related Work\\nAspect based sentiment analysis (ABSA) is a fine-grained sentiment analysis task that consists of various subtasks, including aspect term extraction (ATE) [Ma et al. , 2019], opinion term extraction (OTE) [Wu et al. , 2020], aspect-level sentiment classification (ASC) [Li et al. , 2019b]. Since these subtasks are solved individually, recent studies attempted to couple two subtasks as a compound task, such as aspect term polarity co-extraction (APCE) [Li et al. , 2019a], aspect and opinion co-extraction [Yu et al. , 2019], aspect category and sentiment classification [Hu et al. , 2018], and aspect-opinion pair extraction (AOPE) [Gao et al. , 2021]. Although many works have achieved great progress on these tasks, none of these tasks aims to identify the aspect terms as well as their corresponding opinion term and sentiment polarity.  \\n\\nTo tackle this issue, [Peng et al. , 2020] proposed the aspect sentiment triplet extraction (ASTE) task, which aimed to extract aspect terms, the sentiments of the aspect terms, and the opinion terms causing the sentiments. Some methods [Xu et al. , 2020; Wu et al. , 2020] designed a unified tagging scheme to solve this task. Some others [Chen et al. , 2021; ?] formulated this task as a multi-turn machine reading comprehension task and solve it with machine reading comprehension frameworks. Recently, [Xu et al. , 2021] had propose a span-level model to extract ATs and OTs first and then predict the sentiment relation for each (AT, OT)p airs, which suffers from the similar distribution of the representation of the share-token spans and the complexity from exhaustive pairing of every aspect and opinion span candidates.\\n\\n# 3 Methodology\\nAs shown in Figure 2, our SBC framework consists of five parts: task definition, span generation, similar span separation loss, bidirectional cross-attention structure, and inference. The details of all parts are given in the following subsections.\\n\\n# 3.1 Task Definition\\nGiven a sentence $S=\\\\{w_{1},w_{2},\\\\ldots,w_{n}\\\\}$ consisting $n$ words, the goal of the ASTE task is to extract a set of aspect sentiment t $\\\\mathcal{T}=\\\\{(a,o,c)_{k}\\\\}_{k=1}^{|\\\\mathcal{T}|}$ from the given sentence $S$ ,where $(a,o,c)$ refers to (aspect term, opinion term, sentiment polarity) and $c\\\\in\\\\{P o s i t i v e,N e u t r a l,N e g a t i v e\\\\}$ .\\n\\n# 3.2 Span Generation\\nGiven a sentence in total. Each span $S$ with $\\\\mathbf{s}_{i}\\\\,=\\\\,\\\\bigl\\\\{w_{s t a r t(i)},\\\\cdot\\\\cdot\\\\cdot,w_{e n d(i)}\\\\bigr\\\\}$ $n$ tokens, there are $m$ possible spans is defined by all the tokens from $s t a r t(i)$ to $e n d(i)$ inclusive, and the maximum length of span $\\\\mathbf{s}_{i}$ is $l_{s}$ :  \\n\\n$$\\n1\\\\leq s t a r t(i)\\\\leq e n d(i)\\\\leq n\\n$$  \\n\\n$$\\ne n d(i)-s t a r t(i)\\\\leq l_{s}\\n$$  \\n\\nTo obtain span representations, we need to get the tokenlevel representations first. In this paper, we utilize BERT [Devlin et al. , 2018] as a sentence encoder to obtain token-level contextua sentence Szed representations . Then, the token-level representations are com$\\\\{\\\\mathbf{h}_{1},\\\\mathbf{h}_{2},\\\\dotsc,\\\\mathbf{h}_{n}\\\\}$ of the given bined by max pooling. Note that various methods can be applied to generate the representations for spans, the effectiveness of these span generation methods will be investigated in the ablation study in Appendix. We define the representation of span $\\\\mathbf{s}_{i}$ as:  \\n\\n$\\\\mathbf{g}_{i}=M a x\\\\left(\\\\mathbf{h}_{s t a r t}(i),\\\\mathbf{h}_{s t a r t+1}(i),\\\\ldots,\\\\mathbf{h}_{e n d}(i)\\\\right)$ where Max represents max pooling.  \\n\\n  \\nFigure 2: The overall architecture of the span-level bidirectional cross-attention (SBC) framework. The ‘Select Span Representation’ means that only the original span representations of aspect candidates and opinion candidates are passed to aspect attention module and opinion attention module, respectively. The blue arrows and modules as well as red arrows and modules indicate the extraction of aspect-to-opinion direction and the opinion-to-aspect direction, respectively.\\n\\n# 3.3 Similar Span Separation Loss\\nAfter generating the representation of span, most previous models directly use the span representations for downstream tasks. However, enumerating all possible spans in a sentence inevitably generates lots of spans that have same tokens with some others, and the model may suffer from the limitations in processing these similar spans due to their adjacent distribution. To separate these spans with similar distributions, we propose a similar span separation loss function based on KL divergence for separating spans with shared tokens, as shown in Figure 2. The similar span separation loss is defined as:  \\n\\n$$\\nK L(\\\\mathbf{g}_{i}||G_{i})=\\\\sum_{j}^{G_{i}}s o f t m a x(\\\\mathbf{g}_{i})l o g\\\\frac{s o f t m a x(\\\\mathbf{g}_{i})}{s o f t m a x(\\\\mathbf{g}_{j})}\\n$$  \\n\\n$$\\nK L(G_{i}||\\\\mathbf{g}_{i})=\\\\sum_{j}^{G_{i}}s o f t m a x(\\\\mathbf{g}_{j})l o g\\\\frac{s o f t m a x(\\\\mathbf{g}_{j})}{s o f t m a x(\\\\mathbf{g}_{i})}\\n$$  \\n\\n$$\\n\\\\mathcal{J}_{K L}=\\\\sum_{i}^{m}l o g(1+\\\\frac{2}{K L(G_{i}||\\\\mathbf{g}_{i})+K L(\\\\mathbf{g}_{i}||G_{i})})\\n$$  \\n\\nwhere $G_{i}$ indicates the set of the representation of spans which share at least one token with $\\\\mathbf{s}_{i}$ .\\n\\n# 3.4 Bidirectional Cross-attention Structure\\nAs the aspect sentiment triplet can be triggered by an AT or an OT, we further design a bidirectional cross-attention structure to decode the span representations. As shown in Figure 2, the bidirectional cross-attention structure consists of an aspect decoder and an opinion decoder. The details of each component of bidirectional cross-attention structure are given in the following subsections.\\npaper_title: Knowledge Graph Augmented Network Towards Multiview Representation Learning for Aspect-based Sentiment Analysis\\npaper_metainfo: Journal_Paper_Meta_Data_IEEE_Transactions_on_Knowledge_and_Data_Engineering_with_whole_text.db\\nchunk_id: 1\\n# 1 I NTRODUCTION\\nA Sa fine-grained task of sentiment analysis, aspect-based sentiment analysis (ABSA) has grown to be an active research task in the community of natural language understanding (NLU) [1], [2], [3]. In particular, ABSA refers to judging the sentiment polarities ( e.g. , positive, neutral, and negative) towards the given aspects, which are usually the target entities appearing in the sentence [4]. Taking the sentence “The food was good, but the service was poor.” as an example, as shown in Fig. 1(a), the goal of ABSA is to predict the polarities “positive” and “negative” for the aspects food and service , respectively.  \\n\\nRecent ABSA modeling approaches are mainly based on deep neural networks (DNNs) owing to the capability of automatically extracting semantic features [5]. Specifically, based on the type of learned feature representations, existing DNNs for ABSA can be classified into two groups: context-based methods [6], [7], [8] and syntax-based methods [9], [10], [11]. Context-based methods first employ convolutional neural networks (CNNs) or long short-term memory networks (LSTMs) to extract the features of aspects and context words and then use the attention mechanism to capture the aspect-specific contextual representations. In addition to contextbased methods, syntax-based methods attempt to model the nonlocal dependency trees (a case in point is shown in Fig. 1(b)) of sentences with graph neural networks, e.g. , graph convolutional networks (GCNs) to encode the syntactic information and syntactically connect the aspects with related opinion words [12].  \\n\\n  \\nFig. 1. (a) An example sentence of the ABSA task from the restaurant reviews. There are two aspects with opposite sentiment polarities in this sentence. (b) Illustration of the dependency parsing result.  \\n\\nMore recently, given effective knowledge, e.g. , linguistic and commonsense, for representation approaches in NLU tasks [13], [14], [15], researchers employ external knowledge to augment the semantic features in ABSA models [16], [17], [18], [19]. However, they make extensive modifications to model structures or objectives to encode the different kinds of knowledge, limiting the applicability of their methods to a broader range of tasks and knowledge types. For example, Zhou et al. [17] directly utilized the words ( w.r.t. aspect terms in sentences ) in knowledge graphs as the seed nodes and selected the related nodes to construct the subgraphs. While these subgraph-based methods [17], [20] have achieved remarkable performance, there are still some problems, e.g. , the process of constructing subgraphs is usually relatively complex and would bring more computation, especially when there are many aspect terms. Hence, we attempt to integrate external knowledge from a different perspective.  \\n\\nIn this paper, we propose a novel knowledge graph augmented network, namely, KGAN, to integrate external knowledge for boosting the performance of ABSA task. In general, KGAN employs three parallel branches to learn the feature representations from multiple perspectives ( i.e. , context-, syntax- and knowledgebased). The contextual and syntactic branches are used to extract the explicit context and syntax information from the labeled ABSA data, respectively, as most existing ABSA models do. More specifically, in the knowledge branch, unlike the above previous methods that usually employ complicated approaches to encode the knowledge, we recast them with a simpler and more efficient strategy to incorporate the external knowledge. In practice, instead of directly operating on graph-structure data, we first integrate external knowledge graphs into low-dimensional continuous embeddings, which can be simply and efficiently used to represent sentences and aspects. Then, based on the knowledge embeddings, a soft attention mechanism is utilized to capture the aspect-specific knowledge representations. As a result, we can obtain multiple representations that establish the relations between aspects and opinion words from different views. To take full advantage of the complementarity of these multiview representations, we introduce a novel hierarchical fusion module to effectively fuse them.  \\n\\nWe conduct a comprehensive evaluation of KGAN on SemEval2014 ( i.e. , Laptop14 and Restaurant14), SemEval2015 ( i.e. ,Restaurant15), SemEval2016 ( i.e. , Restaurant16) and Twitter benchmarks. The experimental results show that KGAN achieves comparable performance compared to the prior SOTA model with the GloVe-based setting. Moreover, we also investigate and demonstrate the effectiveness and robustness of our KGAN in BERT- and RoBERTa-based settings. In particular, based on RoBERTa, our model achieves the SOTA performance among all datasets in terms of accuracy and macro-F1 score. More specifically, compared to the prior SOTA models, the accuracy improvements of KGAN on Twitter, Restaurant15 and Restaurant15 datasets are up to $2.49\\\\%$ ,$3.28\\\\%$ and $2.06\\\\%$ , respectively. Finally, we also compare KGAN with the other models in terms of latency and model size and prove that KGAN can achieve a good trade-off between efficiency and performance.  \\n\\nThe main contributions can be summarized as follows:  \\n\\n1) We propose a novel knowledge graph augmented network (KGAN), where different types of information are encoded as multiview representations to augment the semantic features, thus boosting the performance of ABSA.   \\n2) To achieve better complementarity between multiview features, we design a novel hierarchical fusion module to effectively fuse them.   \\n3) Experiments on several commonly used ABSA benchmarks show the effectiveness and universality of our proposed KGAN. In combination with pretrained models, i.e. ,RoBERTa, we achieve new state-of-the-art performance on these benchmarks.  \\n\\nThe rest of this paper is organized as follows. In Sec. 2, we briefly review the related works. In Sec. 3, we introduce our proposed method in detail. Sec. 4 reports and discusses our experimental results. Lastly, we conclude our study in Sec. 5.\\n\\n---\\n\\n<instruction>\\nNow you need to write the content for the subsection.txt:\\n\"2.1 Early Methods Based on Handcrafted Features\" under the section: \"1 Introduction to Aspect-Based Sentiment Analysis\"\\nThe details of what to write in this subsection.txt called 2.1 Early Methods Based on Handcrafted Features is in this descripition:\\n---\\nDiscuss the initial approaches to ABSA that relied on handcrafted features and traditional machine learning algorithms.\\n---\\n\\nHere is the requirement you must follow:\\n1. The content you write must be more than 500 words.\\n2. When writing sentences that are based on specific papers above, you cite the \"paper_info\" and \"chunk_id\" in a \\'[]\\' format to support your content, such as \\'[paper_info][chunk_id]\\'. \\n    An example of citation: \\'For instance, in the work [Learning Multiple Dense Prediction Tasks from Partially Annotated Data, CVPR, 2022][2], the authors propose a method to learn multiple dense prediction tasks from partially annotated data by sharing the parameters across different mappings. \\'.\\n    Note that the \"paper_info\" only include the \"paper_title\", \"name of conference or journal\" and \"published year\" of the paper.\\n    Note that the \"paper_info\" and \"chunk_id\" is not allowed to appear without a \\'[]\\' format. Once you mention the \\'paper_info\\'and \"chunk_id\", it must be included in \\'[]\\'. Papers not existing above are not allowed to cite!!!\\n    Remember that you can only cite the paper provided above !!!\\n3. Only when the main part of the paper support your claims, you cite it.\\n\\n\\nHere\\'s a concise guideline for when to cite papers in a survey:\\n---\\n1. Summarizing Research: Cite sources when summarizing the existing literature.\\n2. Using Specific Concepts or Data: Provide citations when discussing specific theories, models, or data.\\n3. Comparing Findings: Cite relevant studies when comparing or contrasting different findings.\\n4. Highlighting Research Gaps: Cite previous research when pointing out gaps your survey addresses.\\n5. Using Established Methods: Cite the creators of methodologies you employ in your survey.\\n6. Supporting Arguments: Cite sources that back up your conclusions and arguments.\\n7. Suggesting Future Research: Reference studies related to proposed future research directions.\\n---\\n\\n</instruction>\\nReturn the content of subsection.txt \"2.1 Early Methods Based on Handcrafted Features\" in the format:\\n<format>\\n[CONTENT OF SUBSECTION]\\n</format>\\nOnly return the content more than 500 words you write for the subsection.txt 2.1 Early Methods Based on Handcrafted Features without any other information:\\n'"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "prompt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T02:54:21.863267Z",
     "start_time": "2025-01-14T02:53:39.849847Z"
    }
   },
   "outputs": [],
   "source": [
    "subsection_text = zhipu_api(prompt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'Aspect-Based Sentiment Analysis (ABSA) has evolved significantly over the years, with early methods relying heavily on handcrafted features and traditional machine learning algorithms. These approaches were fundamental in laying the groundwork for the more sophisticated techniques that exist today. In this subsection, we will delve into the initial approaches to ABSA, exploring the use of handcrafted features and their integration with machine learning algorithms.\\n\\nEarly ABSA methods primarily focused on identifying aspect terms and their corresponding sentiment polarities within a given text. Aspect term extraction involved the identification of target entities or topics, while sentiment classification aimed to determine the sentiment expressed towards these aspects. These tasks were often treated separately, with researchers developing dedicated algorithms for each.\\n\\nOne of the earliest techniques for aspect term extraction involved the use of lexicon-based approaches. These methods relied on pre-defined lists of seed aspect terms and employed lexical resources such as dictionaries or ontologies to expand the seed list. For example, the work [Aspect-Based Sentiment Analysis with Explicit Sentiment Augmentations, AAAI2024][1] mentions the use of a lexicon-based approach for aspect term extraction, utilizing a pre-defined list of seed aspect terms to identify relevant aspects in a given text.\\n\\nAnother common approach for aspect term extraction was rule-based systems. These systems relied on manually crafted rules to identify aspect terms based on linguistic patterns or syntactic structures. For instance, the work [AX-MABSA: A Framework for Extremely Weakly Supervised Multi-label Aspect Based Sentiment Analysis, EMNLP2022][5] mentions the use of a dependency parser to generate multiple aspect-sentiment labels for each sentence, showcasing the integration of rule-based approaches with machine learning algorithms.\\n\\nSentiment classification in early ABSA methods often involved the use of bag-of-words (BoW) representations, which captured the frequency of occurrence of words in a given text. These representations were then fed into traditional machine learning algorithms such as support vector machines (SVM) or Naive Bayes classifiers to classify the sentiment polarity. The work [Aspect-Based Sentiment Analysis with Explicit Sentiment Augmentations, AAAI2024][1] highlights the use of BoW representations for sentiment classification, demonstrating the effectiveness of this approach in capturing sentiment information.\\n\\nIn addition to BoW representations, researchers also explored the use of n-gram features, which considered sequences of adjacent words, to capture more contextual information. These features were shown to improve the performance of sentiment classification tasks. The work [MvP: Multi-view Prompting Improves Aspect Sentiment Tuple Prediction, ACL2023][1] mentions the use of n-gram features for sentiment classification, showcasing their impact on the performance of ABSA models.\\n\\nTo further enhance the performance of ABSA models, researchers began incorporating additional handcrafted features, such as part-of-speech (POS) tags, named entity recognition (NER) tags, and syntactic parse trees. These features provided richer linguistic information and improved the ability of models to capture the sentiment expressed towards aspects. The work [Counterfactual-Enhanced Information Bottleneck for Aspect-Based Sentiment Analysis, AAAI2024][2] mentions the use of syntactic parse trees for sentiment classification, highlighting the importance of syntactic information in ABSA.\\n\\nOverall, the early methods based on handcrafted features and traditional machine learning algorithms played a crucial role in the development of ABSA. These approaches provided a foundation for understanding the key components of ABSA, such as aspect term extraction and sentiment classification. Furthermore, the integration of handcrafted features with machine learning algorithms demonstrated the importance of incorporating linguistic and syntactic information for accurate sentiment analysis. As the field of ABSA progressed, these early methods paved the way for more advanced techniques based on deep learning and pre-trained language models, which have significantly improved the performance and capabilities of ABSA systems.'"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "subsection_text"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T04:25:03.629065Z",
     "start_time": "2025-01-14T03:47:07.816344Z"
    }
   },
   "outputs": [],
   "source": [
    "# 生成初稿\n",
    "section_contents = [[] for _ in range(len(parsed_outline['sections']))]\n",
    "for i in range(len(parsed_outline['sections'])):\n",
    "    for j in range(len(parsed_outline['subsections'][i])):\n",
    "        section = parsed_outline['sections'][i]\n",
    "        subsection = parsed_outline['subsections'][i][j]\n",
    "        description = parsed_outline['subsection_descriptions'][i][j]\n",
    "        paper_list = section_paper_texts[i][j][0]\n",
    "        subsection_len = 500\n",
    "        prompt = __generate_prompt(SUBSECTION_WRITING_PROMPT,\n",
    "                                   paras={'OVERALL OUTLINE': final_outline_wo_description,\n",
    "                                          'SUBSECTION NAME': subsection,\n",
    "                                          'DESCRIPTION': description, 'TOPIC': topic, 'PAPER LIST': paper_list,\n",
    "                                          'SECTION NAME': section, 'WORD NUM': str(subsection_len)})\n",
    "        subsection_text = zhipu_api(prompt)\n",
    "\n",
    "        section_contents[i].append(subsection_text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T08:03:31.984277Z",
     "start_time": "2025-01-14T08:03:31.976893Z"
    }
   },
   "outputs": [],
   "source": [
    "raw_survey = generate_document(parsed_outline, section_contents)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T05:31:23.790795Z",
     "start_time": "2025-01-14T05:31:23.785398Z"
    }
   },
   "outputs": [],
   "source": [
    "with open(f'doc/raw_survey_{topic}.md', 'w', encoding='utf-8') as f:\n",
    "    f.write(raw_survey)\n",
    "with open(f'doc/raw_survey_{topic}.txt', 'w', encoding='utf-8') as f:\n",
    "    f.write(raw_survey)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(f'doc/raw_survey_{topic}.txt', 'r', encoding='utf-8') as f:\n",
    "    raw_survey = f.read()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T08:32:23.458027Z",
     "start_time": "2025-01-14T08:32:23.421234Z"
    }
   },
   "outputs": [],
   "source": [
    "citations, filtered_raw_survey = extract_citations(subsection_text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['Vision Transformer Adapters for Generalizable Multitask Learning, ICCV, 2023',\n",
       " '7',\n",
       " 'Learning Multiple Dense Prediction Tasks from Partially Annotated Data, CVPR, 2022',\n",
       " '2',\n",
       " 'Dynamic Neural Network for Multi-Task Learning Searching Across Diverse Network Topologies, CVPR, 2023',\n",
       " '6']"
      ]
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "citations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "raw_survey_with_references = replace_citations_with_numbers(citations, filtered_raw_survey)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T08:32:24.269848Z",
     "start_time": "2025-01-14T08:32:24.263229Z"
    }
   },
   "outputs": [],
   "source": [
    "with open(f'doc/raw_survey_with_references_{topic}.md', 'w', encoding='utf-8') as f:\n",
    "    f.write(raw_survey_with_references)\n",
    "with open(f'doc/raw_survey_with_references_{topic}.txt', 'w', encoding='utf-8') as f:\n",
    "    f.write(raw_survey_with_references)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T07:20:36.704320Z",
     "start_time": "2025-01-14T07:00:12.322066Z"
    }
   },
   "outputs": [],
   "source": [
    "import copy\n",
    "\n",
    "section_content_even = copy.deepcopy(section_contents)\n",
    "\n",
    "for i in range(len(section_contents)):\n",
    "    for j in range(len(section_contents[i])):\n",
    "        if j % 2 == 0:\n",
    "            if j == 0:\n",
    "                contents = [''] + section_contents[i][:2]\n",
    "            elif j == (len(section_contents[i]) - 1):\n",
    "                contents = section_contents[i][-2:] + ['']\n",
    "            else:\n",
    "                contents = section_contents[i][j - 1:j + 2]\n",
    "\n",
    "            prompt = __generate_prompt(LCE_PROMPT, paras={'OVERALL OUTLINE': outline, 'PREVIOUS': contents[0],\n",
    "                                                          'FOLLOWING': contents[2], 'TOPIC': topic,\n",
    "                                                          'SUBSECTION': contents[1]})\n",
    "            refined_content = zhipu_api(prompt).replace('<format>', '').replace('</format>', '')\n",
    "            refined_content.replace('Here is the refined subsection:\\n', '')\n",
    "            #   print(prompt+'\\n---------------------------------\\n'+refined_content)\n",
    "            section_content_even[i][j] = refined_content\n",
    "\n",
    "final_section_content = copy.deepcopy(section_content_even)\n",
    "\n",
    "for i in range(len(section_content_even)):\n",
    "    for j in range(len(section_content_even[i])):\n",
    "        if j % 2 == 1:\n",
    "            if j == (len(section_content_even[i]) - 1):\n",
    "                contents = section_content_even[i][-2:] + ['']\n",
    "            else:\n",
    "                contents = section_content_even[i][j - 1:j + 2]\n",
    "            prompt = __generate_prompt(LCE_PROMPT, paras={'OVERALL OUTLINE': outline, 'PREVIOUS': contents[0],\n",
    "                                                          'FOLLOWING': contents[2], 'TOPIC': topic,\n",
    "                                                          'SUBSECTION': contents[1]})\n",
    "            refined_content = zhipu_api(prompt).replace('<format>', '').replace('</format>', '')\n",
    "            #   print(prompt+'\\n---------------------------------\\n'+refined_content)\n",
    "            refined_content.replace('Here is the refined subsection:\\n', '')\n",
    "            final_section_content[i][j] = refined_content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T07:21:42.306816Z",
     "start_time": "2025-01-14T07:21:42.302223Z"
    }
   },
   "outputs": [],
   "source": [
    "refined_survey = generate_document(parsed_outline, final_section_content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-14T07:27:15.555406Z",
     "start_time": "2025-01-14T07:27:15.543617Z"
    }
   },
   "outputs": [],
   "source": [
    "with open('doc/refined_survey.md', 'w', encoding='utf-8') as f:\n",
    "    f.write(refined_survey)\n",
    "with open('doc/refined_survey.txt', 'w', encoding='utf-8') as f:\n",
    "    f.write(refined_survey)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T03:09:36.793750Z",
     "start_time": "2025-01-09T03:09:35.988125Z"
    }
   },
   "outputs": [],
   "source": [
    "import uuid\n",
    "import requests\n",
    "\n",
    "msg = [\n",
    "    {\n",
    "        \"role\": \"user\",\n",
    "        \"content\": \"search ten academic surveys about Aspect Based Sentiment Analysis\"\n",
    "    }\n",
    "]\n",
    "tool = \"web-search-pro\"\n",
    "url = \"https://open.bigmodel.cn/api/paas/v4/tools\"\n",
    "request_id = str(uuid.uuid4())\n",
    "data = {\n",
    "    \"request_id\": request_id,\n",
    "    \"tool\": tool,\n",
    "    \"stream\": False,\n",
    "    \"messages\": msg\n",
    "}\n",
    "\n",
    "resp = requests.post(\n",
    "    url,\n",
    "    json=data,\n",
    "    headers={'Authorization': api_key},\n",
    "    timeout=300\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T03:09:37.510519Z",
     "start_time": "2025-01-09T03:09:37.507609Z"
    }
   },
   "outputs": [],
   "source": [
    "import json\n",
    "\n",
    "result = json.loads(resp.content.decode())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T03:09:37.892037Z",
     "start_time": "2025-01-09T03:09:37.885664Z"
    }
   },
   "outputs": [],
   "source": [
    "result['choices'][0]['message']['tool_calls']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T03:08:26.915658Z",
     "start_time": "2025-01-09T03:08:26.909828Z"
    }
   },
   "outputs": [],
   "source": [
    "result['choices'][0]['message']['tool_calls'][1]['search_result'][1]['content']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "raw_survey = '# Comprehensive Survey on Multi-Task Learning: Applications, Challenges, and Future Directions\\n\\n## 1 Introduction to Multi-Task Learning\\n\\n### 1.1 Definition and Importance of Multi-Task Learning\\n\\n<format>\\nMulti-Task Learning (MTL) is a subfield of machine learning that involves the simultaneous learning of multiple related tasks from a shared dataset. Unlike single-task learning, where each task is learned independently, MTL aims to leverage the interdependencies among tasks to improve the learning efficiency and effectiveness. The concept of MTL has gained significant attention in recent years due to its potential to enhance the performance of machine learning models across various domains, including natural language processing, computer vision, and reinforcement learning.\\n\\nAt its core, MTL involves training a single model that can perform multiple tasks simultaneously. This is achieved by sharing representations or parameters across tasks, which allows the model to learn commonalities and differences between the tasks. By doing so, MTL can exploit the shared information among tasks to improve the generalization performance and reduce the amount of labeled data required for training. This is particularly beneficial in scenarios where labeled data is scarce or expensive to obtain.\\n\\nThe importance of MTL can be understood from several perspectives. Firstly, MTL can improve the generalization performance of machine learning models. By learning from multiple tasks, the model can capture more comprehensive and diverse patterns in the data, leading to better generalization to unseen data. This is particularly useful in domains where the data distribution is complex or non-stationary, as MTL can help the model to adapt to different conditions more effectively.\\n\\nSecondly, MTL can reduce the amount of labeled data required for training. In single-task learning, each task requires a large amount of labeled data to achieve good performance. However, in MTL, the shared representations or parameters can be leveraged across tasks, allowing the model to learn from a smaller amount of labeled data. This is particularly valuable in scenarios where labeled data is scarce or expensive to obtain, such as in medical diagnosis or autonomous driving.\\n\\nFurthermore, MTL can improve the interpretability of machine learning models. By learning from multiple tasks, the model can capture more comprehensive and diverse patterns in the data, leading to better interpretability of the learned representations. This is particularly useful in domains where interpretability is crucial, such as in medical diagnosis or financial analysis.\\n\\nDespite the benefits of MTL, there are also challenges that need to be addressed. One of the main challenges is the task conflict, where the optimization objectives of different tasks may conflict with each other, leading to suboptimal performance. To mitigate this issue, various strategies have been proposed, such as task-specific loss functions, attention mechanisms, and gradient regularization techniques. Another challenge is the scalability of MTL, as the complexity of the model increases with the number of tasks. To address this, efficient optimization algorithms and model architectures have been developed, such as hierarchical models and distributed training frameworks.\\n\\nIn conclusion, MTL is a powerful approach that can improve the efficiency and effectiveness of machine learning models by leveraging the interdependencies among tasks. Its importance lies in its ability to improve generalization performance, reduce the amount of labeled data required for training, and enhance the interpretability of the learned representations. Despite the challenges, MTL has shown promising results in various domains and continues to be an active area of research. With the advancements in machine learning techniques and the availability of large-scale datasets, MTL is expected to play a crucial role in solving complex real-world problems in the future.\\n\\n</format>\\n\\n'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "output_dir = 'output'\n",
    "status = 'raw'\n",
    "title = 'Multi-task Learning'\n",
    "output_path = f\"{output_dir}/survey_{status}_{title}\"\n",
    "\n",
    "with open(f'{output_path}.md', 'w', encoding='utf-8') as f:\n",
    "    f.write(raw_survey)\n",
    "with open(f'{output_path}.txt', 'w', encoding='utf-8') as f:\n",
    "    f.write(raw_survey)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "找到年份: 2024\n",
      "未找到年份\n",
      "找到年份: 1995\n",
      "找到年份: 2010\n",
      "找到年份: 1995\n"
     ]
    }
   ],
   "source": [
    "import re\n",
    "\n",
    "def extract_year(text):\n",
    "    match = re.search(r'(19|20)\\d{2}', text)  # 只匹配1900-2099的4位数字\n",
    "    return match.group(0) if match else None\n",
    "\n",
    "# 测试示例\n",
    "texts = [\"今天是2024年\", \"Hello world!\", \"我的生日是1995年\", \"事件发生在2010abc\", \"版本号 v1995r\"]\n",
    "for text in texts:\n",
    "    year = extract_year(text)\n",
    "    if year:\n",
    "        print(f\"找到年份: {year}\")\n",
    "    else:\n",
    "        print(\"未找到年份\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "这是一段  的文本， 哦。\n"
     ]
    }
   ],
   "source": [
    "import re\n",
    "\n",
    "def remove_square_brackets(text):\n",
    "    # 定义正则表达式模式，用于匹配方括号及其内部的内容\n",
    "    pattern = r'\\[.*?\\]'\n",
    "    # 使用 re.sub() 函数将匹配到的内容替换为空字符串\n",
    "    result = re.sub(pattern, '', text)\n",
    "    return result\n",
    "\n",
    "# 测试示例\n",
    "text = \"这是一段 [包含方括号] 的文本，[里面有内容] 哦。\"\n",
    "cleaned_text = remove_square_brackets(text)\n",
    "print(cleaned_text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "agent",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
