{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "d:\\gitRepo\\CloseAI\\deserted\\prompt.py:65: SyntaxWarning: invalid escape sequence '\\o'\n",
      "  CHECK_ROUGH_OUTLINE_PROMPT = '''\n"
     ]
    }
   ],
   "source": [
    "from prompt import *\n",
    "from zhipuai import ZhipuAI\n",
    "from rag import *\n",
    "\n",
    "api_key = \"29a6e4a2ee21cc38d721fc63a135b6a5.s4l9JvW1FksKq0xo\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def __generate_prompt(template, paras):\n",
    "    prompt = template\n",
    "    for k in paras.keys():\n",
    "        prompt = prompt.replace(f'[{k}]', paras[k])\n",
    "    return prompt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def do_rag_simple(keyword, top_k=30):\n",
    "    paper_id2chunks_list = []\n",
    "    result = search_papers(query=keyword, top_k=top_k)\n",
    "    for paper in result:\n",
    "        paper_id, paper_title, chunk_id, chunk = paper['id'], paper['entity']['paper_title'], paper['entity'][\n",
    "            'chunk_id'], paper['entity']['chunk_text']\n",
    "        paper_id2chunks = {\n",
    "            'original_filename': paper['entity']['original_filename'],\n",
    "            'paper_id': paper_id,\n",
    "            'paper_title': paper_title,\n",
    "            'chunk_id': chunk_id,\n",
    "            'chunk': chunk,\n",
    "        }\n",
    "        paper_id2chunks_list.append(paper_id2chunks)\n",
    "    return paper_id2chunks_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def consolidate_rag_result(rag_result, kind=\"all\"):\n",
    "    # 初始化变量\n",
    "    related_paper_list = []  # 用于存储最终的字符串列表\n",
    "    cur_content = \"\"  # 当前正在构建的字符串\n",
    "\n",
    "    # 遍历 paper_id2chunks_list\n",
    "    for paper_id2chunks in rag_result:\n",
    "        # 将 paper_id2chunks 中的内容拼接成一个字符串\n",
    "        if kind == \"simple\":\n",
    "            content = (\n",
    "                f\"paper_title: {paper_id2chunks['paper_title']}\\n\"\n",
    "                f\"paper_metainfo: {paper_id2chunks['original_filename']}\\n\"\n",
    "                f\"chunk_id: {paper_id2chunks['chunk_id']}\\n\"\n",
    "                f\"chunk_content: {paper_id2chunks['chunk']}\\n\"\n",
    "            )\n",
    "        elif kind == \"abstract\":\n",
    "            content = (\n",
    "                f\"paper_title: {paper_id2chunks['paper_title']}\\n\"\n",
    "                f\"{paper_id2chunks['abstract']}\\n\"\n",
    "            )\n",
    "        elif kind == \"introduction\":\n",
    "            content = (\n",
    "                f\"paper_title: {paper_id2chunks['paper_title']}\\n\"\n",
    "                f\"{paper_id2chunks['introduction']}\\n\"\n",
    "            )\n",
    "        elif kind == \"related_works\":\n",
    "            content = (\n",
    "                f\"paper_title: {paper_id2chunks['paper_title']}\\n\"\n",
    "                f\"{paper_id2chunks['related_works']}\\n\"\n",
    "            )\n",
    "        else:\n",
    "            content = (\n",
    "                f\"paper_title: {paper_id2chunks['paper_title']}\\n\"\n",
    "                f\"{paper_id2chunks['abstract']}\\n\"\n",
    "                f\"{paper_id2chunks['introduction']}\\n\"\n",
    "                f\"{paper_id2chunks['related_works']}\\n\"\n",
    "            )\n",
    "\n",
    "        # 如果当前字符串加上新内容后长度超过 100,000，则将当前字符串添加到 strings 列表中，并开始一个新的字符串\n",
    "        if len(cur_content) + len(content) > 100000:\n",
    "            related_paper_list.append(cur_content)\n",
    "            cur_content = content\n",
    "        else:\n",
    "            cur_content += content\n",
    "\n",
    "    # 将最后一个字符串添加到 strings 列表中\n",
    "    if cur_content:\n",
    "        related_paper_list.append(cur_content)\n",
    "\n",
    "    # total_chars = sum(len(s) for s in related_paper_list)\n",
    "    # 输出结果\n",
    "    # for i, string in enumerate(related_paper_list):\n",
    "    #     print(f\"String {i + 1} (length: {len(string)})\")\n",
    "    return related_paper_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def extract_title_sections_descriptions(outline):\n",
    "    title = outline.split('Title: ')[1].split('\\n')[0]\n",
    "    sections, descriptions = [], []\n",
    "    for i in range(100):\n",
    "        if f'Section {i + 1}' in outline:\n",
    "            sections.append(outline.split(f'Section {i + 1}: ')[1].split('\\n')[0])\n",
    "            descriptions.append(outline.split(f'Description {i + 1}: ')[1].split('\\n')[0])\n",
    "    return title, sections, descriptions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def zhipu_api(prompt, model='glm-4-plus'):\n",
    "    client = ZhipuAI(api_key=api_key)\n",
    "    response = client.chat.completions.create(\n",
    "        model=model,  # 请填写您要调用的模型名称\n",
    "        messages=[\n",
    "            {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
    "            # {\"role\": \"user\", \"content\": \"以严肃，准确的写作方式，帮我写一段关于损失函数的调研综述，要求如下：\\n1.主题结构清晰，章节标题契合\\n2.前后逻辑关系或脉络关系通顺\\n3.通过融合综述方法实现详细阐述技术或方法，而非简单的笼统总结\\n4.内容长度不低于2000字\"},\n",
    "            # {\"role\": \"user\", \"content\": \"In a serious and precise writing style, generate an outline for a survey on Aspect Based Sentiment Analysis\"},\n",
    "            {\"role\": \"user\", \"content\": prompt},\n",
    "        ],\n",
    "        stream=False,\n",
    "    )\n",
    "    # print(response.choices[0].message.content)\n",
    "    return response.choices[0].message.content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "topic = 'Aspect Based Sentiment Analysis'\n",
    "paper_id2chunks_list = do_rag_simple(topic)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<format>\n",
      "Title: A Comprehensive Survey on Aspect-Based Sentiment Analysis\n",
      "Section 1: Introduction to Aspect-Based Sentiment Analysis\n",
      "Description 1: Provide an overview of Aspect-Based Sentiment Analysis (ABSA), its significance, and its applications in various domains.\n",
      "\n",
      "Section 2: Evolution and Challenges in ABSA\n",
      "Description 2: Discuss the evolution of ABSA techniques, from early approaches to recent advancements, highlighting the challenges faced in handling implicit sentiment and mitigating spurious correlations.\n",
      "\n",
      "Section 3: State-of-the-Art Methods and Frameworks\n",
      "Description 3: Present a detailed analysis of recent state-of-the-art methods and frameworks in ABSA, including generative approaches, attention mechanisms, and graph-based models.\n",
      "\n",
      "Section 4: Data Augmentation and Domain Adaptation\n",
      "Description 4: Explore the role of data augmentation and domain adaptation techniques in ABSA, focusing on methods like explicit sentiment augmentations, cross-domain data augmentation, and domain-adaptive language modeling.\n",
      "\n",
      "Section 5: Future Directions and Ethical Considerations\n",
      "Description 5: Discuss potential future directions for ABSA research, addressing limitations of current approaches and highlighting the importance of ethical considerations in sentiment analysis tasks.\n",
      "</format>\n",
      "<format>\n",
      "Title: A Comprehensive Survey on Aspect-Based Sentiment Analysis\n",
      "\n",
      "Section 1: Introduction\n",
      "Description 1: Provide an overview of Aspect-Based Sentiment Analysis (ABSA), its significance, and its applications in various domains.\n",
      "\n",
      "Section 2: Related Work and Challenges\n",
      "Description 2: Discuss the evolution of ABSA, highlighting the key milestones and the existing challenges in the field.\n",
      "\n",
      "Section 3: Methods and Techniques\n",
      "Description 3: Explore the different approaches and methodologies employed in ABSA, including context-based, syntax-based, and knowledge-based methods.\n",
      "\n",
      "Section 4: Data Augmentation and Domain Adaptation\n",
      "Description 4: Investigate the role of data augmentation and domain adaptation techniques in improving the performance and robustness of ABSA models.\n",
      "\n",
      "Section 5: Future Directions and Research Opportunities\n",
      "Description 5: Identify potential research avenues and future directions for advancing ABSA, considering the limitations of current methods and the emerging trends in the field.\n",
      "</format>\n"
     ]
    }
   ],
   "source": [
    "section_num = 5\n",
    "outlines = []\n",
    "outline_rag_result = consolidate_rag_result(paper_id2chunks_list, kind='simple')\n",
    "for i in range(len(outline_rag_result)):\n",
    "    prompt = __generate_prompt(ROUGH_OUTLINE_PROMPT, paras={'PAPER LIST': outline_rag_result[i], 'TOPIC': topic, 'SECTION NUM': str(section_num)})\n",
    "    outline = zhipu_api(prompt)\n",
    "    outlines.append(outline)\n",
    "print(outlines[0])\n",
    "print(outlines[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<format>\n",
      "Title: A Comprehensive Survey on Aspect-Based Sentiment Analysis\n",
      "\n",
      "Section 1: Introduction to Aspect-Based Sentiment Analysis\n",
      "Description 1: Provide an overview of Aspect-Based Sentiment Analysis (ABSA), its significance, and its applications in various domains.\n",
      "\n",
      "Section 2: Evolution and Key Milestones in ABSA\n",
      "Description 2: Discuss the evolution of ABSA techniques, highlighting key milestones and the existing challenges in the field, including handling implicit sentiment and mitigating spurious correlations.\n",
      "\n",
      "Section 3: State-of-the-Art Methods and Frameworks\n",
      "Description 3: Present a detailed analysis of recent state-of-the-art methods and frameworks in ABSA, including context-based, syntax-based, knowledge-based methods, generative approaches, attention mechanisms, and graph-based models.\n",
      "\n",
      "Section 4: Data Augmentation and Domain Adaptation\n",
      "Description 4: Explore the role of data augmentation and domain adaptation techniques in improving the performance and robustness of ABSA models, focusing on methods like explicit sentiment augmentations, cross-domain data augmentation, and domain-adaptive language modeling.\n",
      "\n",
      "Section 5: Future Directions and Ethical Considerations\n",
      "Description 5: Discuss potential future directions for ABSA research, addressing limitations of current approaches, highlighting emerging trends in the field, and emphasizing the importance of ethical considerations in sentiment analysis tasks.\n",
      "</format>\n"
     ]
    }
   ],
   "source": [
    "prompt = __generate_prompt(MERGING_OUTLINE_PROMPT, paras={'TOPIC': topic, 'OUTLINE LIST': '\\n'.join(outlines)})\n",
    "outline = zhipu_api(prompt)\n",
    "print(outline)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "A Comprehensive Survey on Aspect-Based Sentiment Analysis\n",
      "\n",
      "Section Name: Introduction to Aspect-Based Sentiment Analysis\n",
      "Description: Provide an overview of Aspect-Based Sentiment Analysis (ABSA), its significance, and its applications in various domains.\n",
      "\n",
      "\n",
      "Section Name: Evolution and Key Milestones in ABSA\n",
      "Description: Discuss the evolution of ABSA techniques, highlighting key milestones and the existing challenges in the field, including handling implicit sentiment and mitigating spurious correlations.\n",
      "\n",
      "\n",
      "Section Name: State-of-the-Art Methods and Frameworks\n",
      "Description: Present a detailed analysis of recent state-of-the-art methods and frameworks in ABSA, including context-based, syntax-based, knowledge-based methods, generative approaches, attention mechanisms, and graph-based models.\n",
      "\n",
      "\n",
      "Section Name: Data Augmentation and Domain Adaptation\n",
      "Description: Explore the role of data augmentation and domain adaptation techniques in improving the performance and robustness of ABSA models, focusing on methods like explicit sentiment augmentations, cross-domain data augmentation, and domain-adaptive language modeling.\n",
      "\n",
      "\n",
      "Section Name: Future Directions and Ethical Considerations\n",
      "Description: Discuss potential future directions for ABSA research, addressing limitations of current approaches, highlighting emerging trends in the field, and emphasizing the importance of ethical considerations in sentiment analysis tasks.\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "survey_title, survey_sections, survey_section_descriptions = extract_title_sections_descriptions(outline)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<format>\n",
      "Subsection 1: Context-Based Methods\n",
      "Description 1: Discuss the evolution and effectiveness of context-based methods, focusing on neural networks like CNNs and LSTMs, and attention mechanisms for capturing contextual features in ABSA.\n",
      "\n",
      "Subsection 2: Syntax-Based Methods\n",
      "Description 2: Analyze the role of syntax-based methods in ABSA, including the use of dependency trees, graph neural networks, and GCNs for encoding syntactic information and modeling aspect-opinion relationships.\n",
      "\n",
      "Subsection 3: Knowledge-Based Methods\n",
      "Description 3: Explore the integration of external knowledge in ABSA, such as linguistic and commonsense knowledge, and discuss the benefits and challenges of incorporating knowledge graphs for enhanced sentiment analysis.\n",
      "\n",
      "Subsection 4: Generative Approaches\n",
      "Description 4: Examine generative models in ABSA, including sequence-to-sequence models and pre-trained language models like BERT and RoBERTa, and their effectiveness in generating sentiment tuples and handling implicit sentiment.\n",
      "\n",
      "Subsection 5: Attention Mechanisms\n",
      "Description 5: Delve into the development and applications of attention mechanisms in ABSA, highlighting their role in capturing long-distance dependencies and focusing on relevant opinion words for aspect sentiment classification.\n",
      "\n",
      "Subsection 6: Graph-Based Models\n",
      "Description 6: Discuss the use of graph-based models in ABSA, focusing on graph neural networks like GCNs and GATs, and their ability to model complex relationships between aspects and opinion words in a sentence.\n",
      "\n",
      "Subsection 7: Counterfactual Data Augmentation\n",
      "Description 7: Explore the use of counterfactual data augmentation techniques in ABSA, including the generation of adversarial samples and the use of large language models to improve model robustness and mitigate spurious correlations.\n",
      "\n",
      "Subsection 8: Ensemble Learning\n",
      "Description 8: Discuss the role of ensemble learning techniques in ABSA, including the use of bias-only models and the combination of multiple models to improve performance and reduce spurious correlations.\n",
      "\n",
      "Subsection 9: Domain Adaptation\n",
      "Description 9: Analyze the importance of domain adaptation techniques in ABSA, focusing on methods like domain-adaptive pseudo labeling and language modeling for generating labeled data in target domains and improving cross-domain sentiment analysis performance.\n",
      "\n",
      "Subsection 10: Multi-View Learning\n",
      "Description 10: Explore the use of multi-view learning techniques in ABSA, including the integration of context, syntax, and knowledge-based representations for enhanced sentiment analysis and improved performance on various ABSA tasks.\n",
      "</format>\n",
      "<format>\n",
      "Subsection 1: Context-Based Methods\n",
      "Description 1: Discuss the role of context in ABSA, including how context-aware representation improves language understanding and the use of bidirectional LSTMs to capture statistical dependencies in sentences and aspects.\n",
      "\n",
      "Subsection 2: Syntax-Based Methods\n",
      "Description 2: Explore the use of syntactic information in ABSA, focusing on leveraging explicit syntactic structures through graph convolution networks and attention mechanisms to establish connections between aspects and opinion words.\n",
      "\n",
      "Subsection 3: Knowledge-Based Methods\n",
      "Description 3: Examine the integration of external knowledge into ABSA through knowledge graphs, discussing how knowledge graph embeddings and soft attention mechanisms contribute to aspect-specific knowledge representation.\n",
      "\n",
      "Subsection 4: Generative Approaches\n",
      "Description 4: Analyze generative models in ABSA, including their ability to mitigate error propagation in pipeline methods and exploit label semantic information, with a focus on sentiment element sequence, natural language, and structured extraction schema as generative targets.\n",
      "\n",
      "Subsection 5: Attention Mechanisms\n",
      "Description 5: Delve into the application of attention mechanisms in ABSA, highlighting their global modeling capability and effectiveness in capturing long-distance dependencies between aspects and context, as well as addressing challenges in multi-aspect sentences.\n",
      "\n",
      "Subsection 6: Graph-Based Models\n",
      "Description 6: Discuss the use of graph-based models in ABSA, focusing on their ability to represent complex relationships and interactions between aspects, opinion words, and sentiment polarities, and their effectiveness in capturing nuanced sentiment information.\n",
      "\n",
      "Subsection 7: Vision-Language Pre-Training for Multimodal ABSA\n",
      "Description 7: Explore the role of vision-language pre-training in multimodal ABSA, discussing the importance of capturing crossmodal alignment between text and image, and the use of task-specific pre-training tasks to identify fine-grained aspects, opinions, and their alignments.\n",
      "\n",
      "Subsection 8: Weakly Supervised Learning in ABSA\n",
      "Description 8: Examine weakly supervised learning approaches in ABSA, focusing on methods that require minimal labeled data, such as using a single word per class as seed information, unsupervised language model post-training, and multi-label generator models for aspect category-sentiment pair extraction.\n",
      "\n",
      "Subsection 9: Prompt Learning for ABSA\n",
      "Description 9: Discuss the application of prompt learning in ABSA, focusing on element order-based prompt learning methods that improve aspect-level opinion information prediction through multi-view results aggregation and their effectiveness in generative tuple prediction tasks.\n",
      "\n",
      "</format>\n",
      "<format>\n",
      "Subsection 1: Context-Based Methods\n",
      "Description 1: Discuss the role of context in ABSA, including context-aware models and their ability to capture nuanced sentiment expressions.\n",
      "\n",
      "Subsection 2: Syntax-Based Methods\n",
      "Description 2: Explore syntax-driven approaches that leverage grammatical structures to identify and analyze aspect-related sentiments.\n",
      "\n",
      "Subsection 3: Knowledge-Based Methods\n",
      "Description 3: Examine the integration of external knowledge sources, such as ontologies and lexicons, to enhance ABSA performance.\n",
      "\n",
      "Subsection 4: Generative Approaches\n",
      "Description 4: Analyze generative models that generate aspect-specific sentiment descriptions, enhancing the interpretability and depth of analysis.\n",
      "\n",
      "Subsection 5: Attention Mechanisms\n",
      "Description 5: Detail the use of attention mechanisms to focus on relevant aspects and sentiments, improving model accuracy and context understanding.\n",
      "\n",
      "Subsection 6: Graph-Based Models\n",
      "Description 6: Investigate graph-based frameworks that model relationships between aspects and sentiments, facilitating complex sentiment analysis.\n",
      "\n",
      "Subsection 7: Multi-Modal Aspect-Based Sentiment Analysis\n",
      "Description 7: Present an in-depth look at multi-modal approaches, such as DQPSA, that integrate image and text data for enhanced sentiment analysis, addressing challenges like semantic alignment and modal gap.\n",
      "\n",
      "Subsection 8: Energy-Based Models\n",
      "Description 8: Discuss the application of Energy-Based Models in ABSA, focusing on their ability to model pairwise stability and improve span-based extraction methods.\n",
      "\n",
      "Subsection 9: Prompt-Based Techniques\n",
      "Description 9: Explore the use of prompt-based techniques, like Prompt as Dual Query, to guide models in focusing on relevant visual and textual information.\n",
      "\n",
      "Subsection 10: Benchmarking and Performance Evaluation\n",
      "Description 10: Evaluate the performance of state-of-the-art methods using benchmark datasets, highlighting their strengths and limitations.\n",
      "</format>\n"
     ]
    }
   ],
   "source": [
    "query = f\"topic: {topic}\\nsection name: {survey_sections[2]}\\nsection description: {survey_section_descriptions[2]}.\"\n",
    "simple_rag_result = do_rag_simple(query)\n",
    "sub_outlines = []\n",
    "subsection_rag_result = consolidate_rag_result(simple_rag_result, kind='simple')\n",
    "for i in range(len(subsection_rag_result)):\n",
    "        prompt = __generate_prompt(SUBSECTION_OUTLINE_PROMPT,\n",
    "                                paras={'OVERALL OUTLINE': outline, 'SECTION NAME': survey_sections[2],\n",
    "                                        'SECTION DESCRIPTION': survey_section_descriptions[2], 'TOPIC': topic,\n",
    "                                        'PAPER LIST': subsection_rag_result[i]})\n",
    "        sub_outline = zhipu_api(prompt)\n",
    "        sub_outlines.append(sub_outline)\n",
    "        print(sub_outline)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<format>\n",
      "Subsection 1: Context and Syntax-Integrated Methods\n",
      "Description 1: Explore the synergy of context-based and syntax-based approaches in ABSA, discussing how neural networks like CNNs, LSTMs, and attention mechanisms capture contextual features, and how dependency trees and graph neural networks encode syntactic information to model aspect-opinion relationships.\n",
      "\n",
      "Subsection 2: Knowledge and Generative Approaches\n",
      "Description 2: Analyze the integration of external knowledge and generative models in ABSA, focusing on the use of knowledge graphs, linguistic databases, and generative models like BERT and RoBERTa to enhance sentiment analysis, mitigate error propagation, and generate sentiment tuples.\n",
      "\n",
      "Subsection 3: Advanced Modeling Techniques\n",
      "Description 3: Delve into advanced techniques such as attention mechanisms, graph-based models, and multi-modal approaches, highlighting their roles in capturing long-distance dependencies, modeling complex relationships, and integrating text and image data for nuanced sentiment analysis.\n",
      "\n",
      "Subsection 4: Learning and Adaptation Strategies\n",
      "Description 4: Discuss strategies like weakly supervised learning, prompt learning, domain adaptation, and ensemble learning, emphasizing their contributions to improving model robustness, reducing spurious correlations, and enhancing performance across different domains and tasks.\n",
      "</format>\n",
      "<format>\n",
      "Subsection 1: Context and Syntax-Integrated Methods\n",
      "Description 1: Explore the synergy of context-based and syntax-based approaches in ABSA, discussing how neural networks like CNNs, LSTMs, and attention mechanisms capture contextual features, and how dependency trees and graph neural networks encode syntactic information to model aspect-opinion relationships.\n",
      "\n",
      "Subsection 2: Knowledge and Generative Approaches\n",
      "Description 2: Analyze the integration of external knowledge and generative models in ABSA, focusing on the use of knowledge graphs, linguistic databases, and generative models like BERT and RoBERTa to enhance sentiment analysis, mitigate error propagation, and generate sentiment tuples.\n",
      "\n",
      "Subsection 3: Advanced Modeling Techniques\n",
      "Description 3: Delve into advanced techniques such as attention mechanisms, graph-based models, and multi-modal approaches, highlighting their roles in capturing long-distance dependencies, modeling complex relationships, and integrating text and image data for nuanced sentiment analysis.\n",
      "\n",
      "Subsection 4: Learning and Adaptation Strategies\n",
      "Description 4: Discuss strategies like weakly supervised learning, prompt learning, domain adaptation, and ensemble learning, emphasizing their contributions to improving model robustness, reducing spurious correlations, and enhancing performance across different domains and tasks.\n",
      "</format>\n"
     ]
    }
   ],
   "source": [
    "keywords = ['future', 'conclusion']\n",
    "prompt = __generate_prompt(MERGING_SUBSECTION_PROMPT, paras={'TOPIC': topic, 'SECTION_NAME': survey_sections[2], 'OUTLINE LIST': '\\n'.join(sub_outlines)})\n",
    "outline = zhipu_api(prompt)\n",
    "print(outline)\n",
    "\n",
    "outline_copy = outline.replace('<format>', '').replace('</format>', '')\n",
    "outline_list = outline_copy.split(\"\\n\\n\")\n",
    "if not any(x in survey_sections[2] for x in keywords) and any([x in outline_list[-1] for x in keywords]):\n",
    "    outline = \"\\n\\n\".join(outline_list[:-1])\n",
    "    outline = \"<format>\\n\" + outline + \"\\n</format>\"\n",
    "print(outline)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<format>\n",
      "Subsection 1: Early Approaches and Attention Mechanisms\n",
      "Description 1: Discuss the initial development of ABSA techniques, focusing on early neural network approaches and the introduction of attention mechanisms for capturing aspect-opinion relationships.\n",
      "\n",
      "Subsection 2: Syntax-Aware and Graph-Based Models\n",
      "Description 2: Explore the integration of syntactic knowledge and graph neural networks in ABSA, highlighting their role in modeling complex sentence structures and improving sentiment analysis accuracy.\n",
      "\n",
      "Subsection 3: Pre-Trained Language Models and External Knowledge\n",
      "Description 3: Analyze the impact of pre-trained language models like BERT and RoBERTa on ABSA, and discuss the use of external knowledge from resources like knowledge graphs and linguistic databases to enhance sentiment understanding.\n",
      "\n",
      "Subsection 4: Implicit Sentiment and Data Augmentation\n",
      "Description 4: Examine the challenges of handling implicit sentiment in ABSA and explore data augmentation techniques, such as generating counterfactual examples and using explicit sentiment augmentations, to improve model performance.\n",
      "\n",
      "Subsection 5: Weakly Supervised and Unsupervised Learning\n",
      "Description 5: Discuss weakly supervised and unsupervised learning approaches for ABSA, including methods like domain adaptation, clustering, and automatic surface word selection, which enable sentiment analysis with limited or no labeled data.\n",
      "\n",
      "Subsection 6: Cross-Domain Adaptation and Data Generation\n",
      "Description 6: Explore cross-domain adaptation techniques and data generation methods for ABSA, focusing on the use of domain-adaptive language models and strategies for generating labeled data in target domains.\n",
      "\n",
      "Subsection 7: Multi-Task and Compound ABSA Tasks\n",
      "Description 7: Analyze multi-task learning and compound ABSA tasks, such as aspect-opinion pair extraction and aspect-sentiment-opinion triplet extraction, and discuss the benefits of solving multiple related tasks simultaneously.\n",
      "\n",
      "Subsection 8: Advanced Models and Model Aggregation\n",
      "Description 8: Discuss advanced ABSA models, such as span-level bidirectional networks and multi-view prompting methods, and explore strategies for aggregating predictions from multiple models or viewpoints to improve sentiment analysis performance.\n",
      "</format>\n",
      "<format>\n",
      "Subsection 1: Early Methods and Handcrafted Features\n",
      "Description 1: Discuss the initial approaches to ABSA, focusing on the use of handcrafted features and the limitations of these methods in capturing intrinsic semantic associations between aspects and context.\n",
      "\n",
      "Subsection 2: Neural Network-Based Approaches\n",
      "Description 2: Explore the transition to neural network-based methods, such as CNNs, RNNs, and memory networks, and their ability to model semantic relations between aspects and context.\n",
      "\n",
      "Subsection 3: Attention Mechanisms and Long-Distance Dependencies\n",
      "Description 3: Analyze the introduction of attention mechanisms in ABSA, their effectiveness in capturing long-distance semantic features, and the limitations of these mechanisms in differentiating correlations between contextual words and aspects.\n",
      "\n",
      "Subsection 4: Position Information and Proximity Strategies\n",
      "Description 4: Examine the integration of position information and proximity strategies in ABSA models, their impact on capturing relevant opinion words, and the challenges they face in scenarios with complex semantic information.\n",
      "\n",
      "Subsection 5: Graph Neural Networks and Syntactic Structures\n",
      "Description 5: Discuss the use of graph neural networks for modeling syntactic structures in ABSA, their effectiveness in handling aspect-level sentiment classification tasks, and the issues related to noise introduced through dependency trees.\n",
      "\n",
      "Subsection 6: Advanced Techniques and Multi-Modal Approaches\n",
      "Description 6: Explore advanced techniques and multi-modal approaches in ABSA, such as neighboring span enhanced modules, multi-perspective attention mechanisms, and energy-based models, and their roles in capturing comprehensive sentiment information.\n",
      "\n",
      "Subsection 7: Learning and Adaptation Strategies\n",
      "Description 7: Discuss learning and adaptation strategies in ABSA, including semi-supervised learning, domain adaptation, and ensemble learning, and their contributions to improving model robustness and performance across different domains and tasks.\n",
      "\n",
      "Subsection 8: Challenges and Future Directions\n",
      "Description 8: Highlight the existing challenges in ABSA, such as handling implicit sentiment and mitigating spurious correlations, and discuss potential future directions for research in this field.\n",
      "</format>\n",
      "<format>\n",
      "Subsection 1: Context-Based Methods\n",
      "Description 1: Discuss the evolution of context-based methods in ABSA, from early LSTM and CNN approaches to attention mechanisms and the integration of pre-trained language models like BERT and RoBERTa for enhanced feature extraction and sentiment understanding.\n",
      "\n",
      "Subsection 2: Syntax-Based Methods\n",
      "Description 2: Explore the development of syntax-based methods, focusing on the utilization of dependency trees and graph neural networks to capture syntactic relationships and improve the alignment of aspects with their corresponding sentiment expressions.\n",
      "\n",
      "Subsection 3: Knowledge-Based Methods\n",
      "Description 3: Analyze the integration of external knowledge in ABSA, including the use of knowledge graphs and linguistic databases to augment semantic features and enhance sentiment analysis performance.\n",
      "\n",
      "Subsection 4: Generative Approaches\n",
      "Description 4: Examine the role of generative models in ABSA, discussing how models like BERT and RoBERTa are used for data augmentation and the generation of sentiment tuples, and how these approaches contribute to mitigating spurious correlations.\n",
      "\n",
      "Subsection 5: Attention Mechanisms\n",
      "Description 5: Delve into the application of attention mechanisms in ABSA, highlighting their effectiveness in capturing long-distance dependencies and focusing on relevant opinion words for accurate sentiment prediction.\n",
      "\n",
      "Subsection 6: Graph-Based Models\n",
      "Description 6: Explore the use of graph-based models in ABSA, discussing how graph neural networks and other graph-based approaches are employed to model complex relationships and improve sentiment analysis performance.\n",
      "\n",
      "Subsection 7: Advanced Techniques and Frameworks\n",
      "Description 7: Present a detailed analysis of recent state-of-the-art methods and frameworks in ABSA, including context-based, syntax-based, knowledge-based methods, generative approaches, attention mechanisms, and graph-based models.\n",
      "</format>\n",
      "<format>\n",
      "Subsection 1: Context and Syntax-Integrated Methods\n",
      "Description 1: Explore the synergy of context-based and syntax-based approaches in ABSA, discussing how neural networks like CNNs, LSTMs, and attention mechanisms capture contextual features, and how dependency trees and graph neural networks encode syntactic information to model aspect-opinion relationships.\n",
      "\n",
      "Subsection 2: Knowledge and Generative Approaches\n",
      "Description 2: Analyze the integration of external knowledge and generative models in ABSA, focusing on the use of knowledge graphs, linguistic databases, and generative models like BERT and RoBERTa to enhance sentiment analysis, mitigate error propagation, and generate sentiment tuples.\n",
      "\n",
      "Subsection 3: Advanced Modeling Techniques\n",
      "Description 3: Delve into advanced techniques such as attention mechanisms, graph-based models, and multi-modal approaches, highlighting their roles in capturing long-distance dependencies, modeling complex relationships, and integrating text and image data for nuanced sentiment analysis.\n",
      "\n",
      "Subsection 4: Learning and Adaptation Strategies\n",
      "Description 4: Discuss strategies like weakly supervised learning, prompt learning, domain adaptation, and ensemble learning, emphasizing their contributions to improving model robustness, reducing spurious correlations, and enhancing performance across different domains and tasks.\n",
      "\n",
      "Subsection 5: State-of-the-Art Methods and Frameworks\n",
      "Description 5: Present a detailed analysis of recent state-of-the-art methods and frameworks in ABSA, including context-based, syntax-based, knowledge-based methods, generative approaches, attention mechanisms, and graph-based models.\n",
      "</format>\n",
      "<format>\n",
      "Subsection 1: Context and Syntax-Integrated Approaches\n",
      "Description 1: Examine the integration of context-based and syntax-based methods in ABSA, focusing on how neural networks like CNNs, LSTMs, and attention mechanisms capture contextual nuances, and how dependency trees and graph neural networks encode syntactic structures to model aspect-opinion relationships effectively.\n",
      "\n",
      "Subsection 2: Knowledge-Enhanced and Generative Models\n",
      "Description 2: Analyze the role of external knowledge sources and generative models in ABSA, detailing the use of knowledge graphs, linguistic databases, and advanced models like BERT and RoBERTa to enhance sentiment analysis accuracy, mitigate error propagation, and generate comprehensive sentiment tuples.\n",
      "\n",
      "Subsection 3: Advanced Attention and Graph-Based Techniques\n",
      "Description 3: Explore advanced techniques such as sophisticated attention mechanisms and graph-based models, highlighting their ability to capture long-distance dependencies, model complex aspect-opinion relationships, and integrate multi-modal data for more nuanced sentiment analysis.\n",
      "\n",
      "Subsection 4: Learning and Adaptation Mechanisms\n",
      "Description 4: Discuss various learning and adaptation strategies including weakly supervised learning, prompt learning, domain adaptation, and ensemble learning, emphasizing their contributions to model robustness, reduction of spurious correlations, and improved performance across diverse domains and tasks.\n",
      "\n",
      "Subsection 5: Multi-Modal Aspect-Based Sentiment Analysis\n",
      "Description 5: Present a detailed analysis of multi-modal ABSA methods, focusing on the integration of text and image data, challenges in semantic alignment and modal gap, and innovative frameworks like DQPSA that address these challenges to achieve state-of-the-art performance.\n",
      "\n",
      "Subsection 6: Energy-Based Models and Span Prediction\n",
      "Description 6: Delve into the application of Energy-Based Models in ABSA, particularly in predicting span boundaries and enhancing the pairwise relevance between target spans, showcasing the effectiveness of these models in improving sentiment analysis accuracy.\n",
      "\n",
      "Subsection 7: Benchmarking and Performance Evaluation\n",
      "Description 7: Evaluate the performance of state-of-the-art ABSA methods on various benchmarks, comparing their results with existing approaches and highlighting the superior performance of novel frameworks like DQPSA against traditional and multi-modal large language models.\n",
      "</format>\n",
      "<format>\n",
      "Subsection 1: Data Augmentation Techniques\n",
      "Description 1: Discuss various data augmentation techniques employed in ABSA, such as explicit sentiment augmentations, cross-domain data augmentation, and the role of language models in generating diverse and fluent data.\n",
      "\n",
      "Subsection 2: Domain Adaptation Strategies\n",
      "Description 2: Explore unsupervised domain adaptation methods for ABSA, including pivot-based methods, auto-encoders, domain adversarial networks, and semi-supervised methods, focusing on learning domain-invariant representations and mitigating distribution discrepancies.\n",
      "\n",
      "Subsection 3: Domain-Adaptive Pseudo Labeling\n",
      "Description 3: Explain the process of assigning pseudo labels to unlabeled target-domain data using aspect-aware domain adaptation models, and the importance of minimizing domain discrepancy for improving pseudo-label quality.\n",
      "\n",
      "Subsection 4: Domain-Adaptive Language Modeling\n",
      "Description 4: Describe the design and training of a Domain-Adaptive Language Model (DALM) that integrates data generation and sequence labeling, capturing transferable context and annotation across domains.\n",
      "\n",
      "Subsection 5: Target-Domain Data Generation\n",
      "Description 5: Discuss the generation of labeled target-domain data using the trained DALM, employing probability-based generation strategies for creating diverse and fluent data with fine-grained annotations.\n",
      "\n",
      "Subsection 6: Evaluation and Analysis\n",
      "Description 6: Present the evaluation results of the proposed data augmentation and domain adaptation methods on ABSA and AE tasks, analyzing the performance, diversity, fluency, and data distribution of the generated data.\n",
      "\n",
      "Subsection 7: Limitations and Future Work\n",
      "Description 7: Discuss the limitations of the current approaches and propose potential future research directions, such as generating novel target-domain words and extending the methods to other information extraction tasks.\n",
      "\n",
      "Subsection 8: Ethical Considerations\n",
      "Description 8: Address the ethical concerns related to the generation of data and the potential for sensitive and misleading content, emphasizing the need for manual checking and responsible use of the generated data.\n",
      "\n",
      "</format>\n",
      "<format>\n",
      "Subsection 1: Explicit Sentiment Augmentation\n",
      "Description 1: Discuss explicit sentiment augmentation methods, such as counterfactual data generation and multi-pattern prompting, and their impact on mitigating spurious correlations and improving model robustness in ABSA.\n",
      "\n",
      "Subsection 2: Cross-Domain Data Augmentation\n",
      "Description 2: Explore cross-domain data augmentation techniques, including domain mapping and style transfer, and their effectiveness in addressing sensor bias and domain shift issues in ABSA.\n",
      "\n",
      "Subsection 3: Domain-Adaptive Language Modeling\n",
      "Description 3: Analyze domain-adaptive language modeling approaches, like active learning and density-aware selection, and their role in balancing discriminability and transferability for robust ABSA performance across different domains.\n",
      "\n",
      "Subsection 4: Multimodal Approaches for Data Augmentation\n",
      "Description 4: Examine multimodal data augmentation strategies, including text and image augmentation, and their potential to enhance model generalization and robustness in ABSA tasks.\n",
      "\n",
      "Subsection 5: Active Learning Strategies for Domain Adaptation\n",
      "Description 5: Discuss active learning techniques, such as region impurity and prediction uncertainty, and their application in efficiently selecting informative samples for domain adaptation in ABSA.\n",
      "\n",
      "Subsection 6: Adversarial Bayesian Augmentation\n",
      "Description 6: Explore adversarial Bayesian augmentation methods, including adversarial training and Bayesian neural networks, and their contribution to learning domain-invariant representations for ABSA.\n",
      "\n",
      "Subsection 7: Multisource Active Domain Transfer\n",
      "Description 7: Analyze multisource active domain transfer approaches, incorporating hypernetworks and evidential deep learning, and their effectiveness in measuring domain uncertainty and selecting valuable target samples for ABSA.\n",
      "\n",
      "Subsection 8: Bi-Syntax Aware Graph Attention Networks\n",
      "Description 8: Discuss the use of bi-syntax aware graph attention networks, leveraging constituent tree syntax information, to model sentiment-aware context and improve aspect-opinion alignment in ABSA.\n",
      "\n",
      "Subsection 9: Dynamic Density-Aware Active Domain Adaptation\n",
      "Description 9: Explore dynamic density-aware active domain adaptation techniques, focusing on adaptive budget allocation policies and density-aware selection methods for efficient domain adaptation in ABSA.\n",
      "\n",
      "Subsection 10: Adversarial Learning with Semantics Transformations\n",
      "Description 10: Analyze adversarial learning with semantics transformations, including standard data augmentations with learnable parameters, and their potential to improve model robustness against out-of-distribution domain shifts in ABSA.\n",
      "\n",
      "Subsection 11: Domain Adaptation in Multilingual Settings\n",
      "Description 11: Discuss domain adaptation approaches in multilingual and multi-domain monolingual settings, focusing on their application in complex word identification and cross-lingual transfers for ABSA.\n",
      "</format>\n",
      "<format>\n",
      "Subsection 1: Addressing Implicit Sentiment\n",
      "Description 1: Discuss the challenges of implicit sentiment in ABSA and explore methods for handling it, such as incorporating external knowledge, post-training strategies, and explicit sentiment augmentations.\n",
      "\n",
      "Subsection 2: Cross-Domain Adaptation and Data Augmentation\n",
      "Description 2: Analyze the limitations of supervised ABSA methods and the potential of unsupervised domain adaptation techniques, focusing on generating labeled data for emerging domains and improving model robustness.\n",
      "\n",
      "Subsection 3: Mitigating Spurious Correlations\n",
      "Description 3: Examine the issue of spurious correlations in ABSA and investigate approaches to reduce them, including counterfactual data augmentation and information bottleneck techniques.\n",
      "\n",
      "Subsection 4: Ethical Considerations and Responsible Practices\n",
      "Description 4: Highlight the ethical concerns surrounding sentiment analysis, emphasizing the importance of transparency, accountability, and fairness in developing and deploying ABSA models.\n",
      "\n",
      "Subsection 5: Future Directions and Emerging Trends\n",
      "Description 5: Discuss potential future directions for ABSA research, addressing limitations of current approaches, highlighting emerging trends in the field, and emphasizing the importance of ethical considerations in sentiment analysis tasks.\n",
      "</format>\n",
      "<format>\n",
      "Subsection 1: Cross-Modal Alignment and Pre-Training\n",
      "Description 1: Discuss the importance of cross-modal alignment in MABSA and the role of task-specific pre-training frameworks like VLPMABSA in capturing fine-grained aspects, opinions, and their alignments across modalities.\n",
      "\n",
      "Subsection 2: Advanced Modeling Techniques for MABSA\n",
      "Description 2: Explore advanced techniques such as span-level bidirectional cross-attention frameworks, energy-based models, and aspect-oriented opinion alignment networks in addressing challenges related to visual information focus, modal gap mitigation, and semantic mismatch in MABSA.\n",
      "\n",
      "Subsection 3: Knowledge Integration and Multi-View Learning\n",
      "Description 3: Analyze the integration of external knowledge and multi-view learning approaches in ABSA, focusing on the use of knowledge graphs, linguistic databases, and multi-view prompting to enhance sentiment analysis and improve aspect-opinion alignment.\n",
      "\n",
      "Subsection 4: Ethical Considerations and Bias Mitigation\n",
      "Description 4: Discuss ethical considerations in sentiment analysis, highlighting the importance of addressing bias, ensuring fairness, and promoting inclusivity in ABSA research and applications.\n",
      "\n",
      "Subsection 5: Open and Diverse Aspect-Based Summarization\n",
      "Description 5: Explore the development of open and diverse aspect-based summarization benchmarks like OpenAsp, which facilitate the extraction of ad-hoc aspects and the generation of aspect-based summaries from multi-document datasets.\n",
      "\n",
      "Subsection 6: Weak Supervision and Adaptation Strategies\n",
      "Description 6: Discuss strategies like weakly supervised learning and domain adaptation in ABSA, emphasizing their contributions to improving model robustness and performance across different domains and tasks.\n",
      "\n",
      "Subsection 7: Future Directions and Emerging Trends\n",
      "Description 7: Identify potential future directions for ABSA research, addressing limitations of current approaches, and highlighting emerging trends such as the integration of generative models, the exploration of new pre-training techniques, and the development of more comprehensive and inclusive benchmarks.\n",
      "</format>\n"
     ]
    }
   ],
   "source": [
    "# 生成二级提纲\n",
    "keywords = ['future', 'conclusion']\n",
    "sub_sections_list = []\n",
    "for index, (section_name, section_description) in enumerate(zip(survey_sections, survey_section_descriptions)):\n",
    "    if index == 0:\n",
    "        continue\n",
    "    query = f\"topic: {topic}\\nsection name: {section_name}\\nsection description: {section_description}.\"\n",
    "    simple_rag_result = do_rag_simple(query)\n",
    "    sub_outlines = []\n",
    "    subsection_rag_result = consolidate_rag_result(simple_rag_result, kind='simple')\n",
    "    for i in range(len(subsection_rag_result)):\n",
    "        prompt = __generate_prompt(SUBSECTION_OUTLINE_PROMPT,\n",
    "                                paras={'OVERALL OUTLINE': outline, 'SECTION NAME': section_name,\n",
    "                                        'SECTION DESCRIPTION': section_description, 'TOPIC': topic,\n",
    "                                        'PAPER LIST': subsection_rag_result[i]})\n",
    "        sub_outline = zhipu_api(prompt)\n",
    "        sub_outlines.append(sub_outline)\n",
    "        print(sub_outline)\n",
    "    prompt = __generate_prompt(MERGING_SUBSECTION_PROMPT, paras={'TOPIC': topic, 'SECTION_NAME': section_name, 'OUTLINE LIST': '\\n'.join(sub_outlines)})\n",
    "    sub_outline = zhipu_api(prompt)\n",
    "    sub_outline_copy = sub_outline.replace('<format>', '').replace('</format>', '')\n",
    "    sub_outline_list = sub_outline_copy.split(\"\\n\\n\")\n",
    "    if not any(x in section_name for x in keywords) and any([x in sub_outline_list[-1] for x in keywords]):\n",
    "        sub_outline = \"\\n\\n\".join(sub_outline_list[:-1])\n",
    "        sub_outline = \"<format>\\n\" + sub_outline + \"\\n</format>\"\n",
    "    sub_sections_list.append(sub_outline)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<format>\n",
      "\n",
      "Subsection 1: Evolution of ABSA Techniques\n",
      "Description 1: Trace the evolution from early handcrafted feature-based methods to neural network approaches, including CNNs, RNNs, and the introduction of attention mechanisms for capturing aspect-opinion relationships and long-distance dependencies.\n",
      "\n",
      "Subsection 2: Advanced Modeling and Syntactic Integration\n",
      "Description 2: Explore the integration of syntactic knowledge through graph neural networks, position information, and proximity strategies, highlighting their role in modeling complex sentence structures and improving sentiment analysis accuracy.\n",
      "\n",
      "Subsection 3: Pre-Trained Models and External Knowledge\n",
      "Description 3: Analyze the impact of pre-trained language models like BERT and RoBERTa on ABSA, and discuss the use of external knowledge from resources like knowledge graphs and linguistic databases to enhance sentiment understanding.\n",
      "</format>\n",
      "<format>\n",
      "Subsection 1: Integrated Context and Syntax Approaches\n",
      "Description 1: Examine the synergy of context-based and syntax-based methods in ABSA, discussing how neural networks like CNNs, LSTMs, and attention mechanisms capture contextual features, and how dependency trees and graph neural networks encode syntactic information to model aspect-opinion relationships effectively.\n",
      "\n",
      "Subsection 2: Knowledge-Enhanced and Generative Models\n",
      "Description 2: Analyze the integration of external knowledge and generative models in ABSA, focusing on the use of knowledge graphs, linguistic databases, and models like BERT and RoBERTa to enhance sentiment analysis, mitigate error propagation, and generate sentiment tuples.\n",
      "\n",
      "Subsection 3: Advanced Techniques and Learning Strategies\n",
      "Description 3: Delve into advanced techniques such as attention mechanisms, graph-based models, and multi-modal approaches, and discuss learning strategies like weakly supervised learning, prompt learning, domain adaptation, and ensemble learning, emphasizing their roles in improving model robustness and performance.\n",
      "\n",
      "Subsection 4: Benchmarking and Performance Evaluation\n",
      "Description 4: Evaluate the performance of state-of-the-art ABSA methods on various benchmarks, comparing their results with existing approaches and highlighting the superior performance of novel frameworks against traditional and multi-modal large language models.\n",
      "</format>\n",
      "<format>\n",
      "\n",
      "Subsection 1: Data Augmentation Techniques in ABSA\n",
      "Description 1: Discuss various data augmentation techniques such as explicit sentiment augmentations, cross-domain data augmentation, and the role of language models in generating diverse and fluent data, emphasizing their impact on model robustness and mitigation of spurious correlations.\n",
      "\n",
      "Subsection 2: Domain Adaptation Strategies for ABSA\n",
      "Description 2: Explore unsupervised domain adaptation methods including pivot-based methods, auto-encoders, domain adversarial networks, and semi-supervised methods, focusing on learning domain-invariant representations and mitigating distribution discrepancies to enhance ABSA performance across different domains.\n",
      "\n",
      "Subsection 3: Advanced Domain-Adaptive Techniques\n",
      "Description 3: Analyze advanced techniques such as domain-adaptive language modeling, multimodal approaches, and adversarial Bayesian augmentation, highlighting their contributions to balancing discriminability and transferability, and improving aspect-opinion alignment in ABSA.\n",
      "</format>\n",
      "<format>\n",
      "\n",
      "Subsection 1: Advanced Techniques and Cross-Modal Alignment in ABSA\n",
      "Description 1: Explore advanced modeling techniques such as span-level bidirectional cross-attention, energy-based models, and aspect-oriented opinion alignment networks, along with the importance of cross-modal alignment and task-specific pre-training frameworks to enhance fine-grained aspect and opinion detection across different modalities.\n",
      "\n",
      "Subsection 2: Knowledge Integration and Multi-View Learning\n",
      "Description 2: Analyze the integration of external knowledge sources like knowledge graphs and linguistic databases, and the application of multi-view learning and prompting techniques to improve aspect-opinion alignment and sentiment analysis accuracy.\n",
      "\n",
      "Subsection 3: Ethical Considerations and Bias Mitigation\n",
      "Description 3: Discuss the ethical implications of ABSA, emphasizing the need to address bias, ensure fairness, promote inclusivity, and adhere to responsible practices in the development and deployment of sentiment analysis models.\n",
      "</format>\n"
     ]
    }
   ],
   "source": [
    "for item in sub_sections_list:\n",
    "    print(item)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "agent",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
