# -*- coding: utf-8 -*-
from globalvariables import *
from Agents.agent import Agent
from utils import *
import requests
import re
import time
import copy
import random
import asyncio
import aiohttp
from itertools import combinations

from collections import defaultdict
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity


class RetrievalAgent(Agent):
    def __init__(self, token):
        super().__init__(token)
        self.url_by_chunk = "http://180.184.65.98:38880/atomgit/search_papers"
        self.url_by_paper_id = "http://180.184.65.98:38880/atomgit/query_whole_text_by_id"
        self.url_by_paper_title = "http://180.184.65.98:38880/atomgit//query_whole_text_by_title"
        self.url_by_paper_title_chunk = 'http://180.184.65.98:38880/atomgit/query_by_title'
        self.url_by_paper_id_chunk = 'http://180.184.65.98:38880/atomgit/query_by_paper_id'
        self.url_by_title_contain = "http://180.184.65.98:38880/atomgit/query_by_title_contain"
        self.url_by_title_like = "http://180.184.65.98:38880/atomgit/query_by_title_like"
        self.url_by_metadata_title_contain = "http://180.184.65.98:38880/atomgit/query_paper_metadata_that_title_contain"
        self.url_by_metadata_title_like = "http://180.184.65.98:38880/atomgit/titles_like"
        self.num_paper_max = 20  # 为大纲生成得到的检索论文的个数
        self.num_paper_threshold = 8
        self.chunked_papers = []
        self.paper_ids_set = set()
        self.paper_titles_set4outline = set()

    async def retrieve_papers_by_title_contain(self, word):
        """异步检索给定关键词的论文ID"""
        try:
            async with aiohttp.ClientSession() as session:
                params = {"title": word, "top_k": 5000}
                async with session.get("http://180.184.65.98:38880/atomgit/query_by_title_contain",
                                       params=params) as resp:
                    if resp.status == 200:
                        papers = await resp.json()
                        return {paper['paper_id'] for paper in papers}
                    else:
                        print(f"Error: {resp.status} for word '{word}'")
                        return {}
        except Exception as e:
            print(f"Exception for word '{word}': {e}")
            return {}

    async def retrieve_paper_metadata_that_title_contain(self, word, top_k):
        """异步检索包含给定关键词的论文标题集合"""
        try:
            async with aiohttp.ClientSession() as session:
                params = {"title": word, "top_k": top_k}
                async with session.get(self.url_by_metadata_title_contain, params=params) as resp:
                    if resp.status == 200:
                        papers_metadata = await resp.json()
                        return {paper[1] for paper in papers_metadata}
                    else:
                        print(f"Error: {resp.status} for word '{word}'")
                        return {}
        except Exception as e:
            print(f"Exception for word '{word}': {e}")
            return {}

    async def retrieve_papers_by_title_like(self, word):
        """异步检索给定话题的论文ID"""
        try:
            async with aiohttp.ClientSession() as session:
                params = {"title": word, "top_k": 25}
                async with session.get(self.url_by_title_like, params=params) as resp:
                    if resp.status == 200:
                        papers = await resp.json()
                        return {paper[0]['paper_id'] for paper in papers}
                    else:
                        print(f"Error: {resp.status} for word '{word}'")
                        return {}
        except Exception as e:
            print(f"Exception for word '{word}': {e}")
            return {}

    async def retrieve_paper_metadata_by_title_like(self, topic, top_k):
        """异步检索给定话题的论文标题"""
        try:
            async with aiohttp.ClientSession() as session:
                params = {"title": topic, "top_k": top_k}
                async with session.get(self.url_by_metadata_title_like, params=params) as resp:
                    if resp.status == 200:
                        papers = await resp.json()
                        return set(papers)
                    else:
                        print(f"Error: {resp.status} for topic '{topic}'")
                        return {}
        except Exception as e:
            print(f"Exception for topic '{topic}': {e}")
            return {}

    async def retrieve_papers_by_chunk(self, kw, url):
        """异步检索给定chunk的论文ID"""
        try:
            async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=60)) as session:
                params = {"chunk": kw, "top_k": 200}
                async with session.get(url, params=params) as response:
                    if response.status == 200:
                        chunks = await response.json()
                        return [chunk['paper_id'] for chunk in chunks]
                    else:
                        print(f"Error: {response.status} for kw '{kw}'")
                        return []
        except Exception as e:
            print(f"Exception for word '{kw}': {e}")
            return []

    async def retrieve_chunked_paper_by_paper_id(self, paper_id, url):
        """异步检索给定id的论文标题"""
        try:
            async with aiohttp.ClientSession() as session:
                params = {"paper_id": paper_id, "top_k": 10}
                async with session.get(url, params=params) as response:
                    if response.status == 200:
                        paper = await response.json()
                        return paper[0]['paper_title']
                    else:
                        print(f"Error: {response.status} for '{paper_id}'")
                        return []
        except Exception as e:
            print(f"Exception for id '{paper_id}': {e}")
            return []

    async def retrieve_chunked_paper_by_paper_title(self, paper_title, top_k):
        """给定论文标题异步检索chunked的论文内容"""
        try:
            async with aiohttp.ClientSession() as session:
                params = {"title": paper_title, "top_k": top_k}
                async with session.get(self.url_by_paper_title_chunk, params=params) as response:
                    if response.status == 200:
                        chunked_paper = await response.json()
                        return chunked_paper
                    else:
                        print(f"Error: {response.status} for '{paper_title}'")
                        return []
        except Exception as e:
            print(f"Exception for title '{paper_title}': {e}")
            return []

    async def retrieve_papertext_by_paper_title(self, url, paper_title):
        """根据论文ID异步检索论文文本"""
        try:
            async with aiohttp.ClientSession() as session:
                params = {"title": paper_title}
                async with session.get(url, params=params) as response:
                    if response.status == 200:
                        paper_text = await response.json()
                        return paper_text
                    else:
                        print(f"Error: {response.status} for paper: '{paper_title}'")
                        return ''
        except Exception as e:
            print(f"Exception for paper: '{paper_title}': {e}")
            return ''

    # async def extract_abstract(self, text):
    #     # 正则表达式匹配摘要部分
    #     abstract_pattern_1 = re.compile(
    #         r'#\s*[Aa]\s*[Bb]\s*[Ss]\s*[Tt]\s*[Rr]\s*[Aa]\s*[Cc]\s*[Tt]\s*\n(.*?)(\n{2,}|#\s|\Z)',
    #         re.DOTALL
    #     )
    #     abstract_pattern_2 = re.compile(
    #         r'(?<=\s)Abstract\.\s(.*?)(?=\n{2,}|#\s|\Z)',
    #         re.DOTALL
    #     )
    #     abstract = ''
    #     match_1 = abstract_pattern_1.search(text)
    #
    #     if match_1:
    #         abstract = match_1.group(1).strip()
    #     else:
    #         match_2 = abstract_pattern_2.search(text)
    #         if match_2:
    #             abstract = match_2.group(1).strip()
    #     return abstract

    async def extract_abstract(self, text):

        text = re.sub(r'\$.*?\$', '', text)
        # **匹配 # Abstract 或 Abstract. 之后的内容**
        abstract_pattern_1 = re.compile(
            r'#\s*[Aa]\s*[Bb]\s*[Ss]\s*[Tt]\s*[Rr]\s*[Aa]\s*[Cc]\s*[Tt]\s*\n+([\s\S]+?)(?=\n{2,}|#\s|\Z)',
            re.DOTALL
        )
        abstract_pattern_2 = re.compile(
            r'(?<=\s)Abstract\.\s([\s\S]+?)(?=\n{2,}|#\s|\Z)',
            re.DOTALL
        )
        abstract = ''

        # **优先匹配 # Abstract 形式**
        match_1 = abstract_pattern_1.search(text)
        if match_1:
            abstract = match_1.group(1).strip()
        else:
            # **匹配 "Abstract." 形式**
            match_2 = abstract_pattern_2.search(text)
            if match_2:
                abstract = match_2.group(1).strip()

        return abstract

    async def judge_relevance_by_text(self, paper_title, abstract, topic):
        Relevance = False
        judge_relevance_input = judge_relevance_prompt % (paper_title, abstract, topic)
        resp = await self.chat_async(user_input=judge_relevance_input,
                                     response_format_type="text",
                                     language="English", do_sample=False)
        resp = resp.lower()
        if 'yes' in resp or 'true' in resp or '是' in resp:
            Relevance = True
        return Relevance

    async def judge_relevance(self, paper_title, topic):
        # todo:给定标题判断与主题的相关度，明天改一下，还是分两个阶段进行判断，这样便于后期在章节检索时，直接有了chunk文段时，对相关性进行判断的复用
        # 毕竟，章节主题可能直接对应一篇文章的某个chunk而不是对应整篇文章，所以不一定总是在判断全文与主题的相关度，也可能是判断某段文本与主题的相关度
        # 根据论文标题获取论文全文
        paper_whole_text = await self.retrieve_papertext_by_paper_title(self.url_by_paper_title, paper_title)
        # 提取论文摘要
        abstract = await self.extract_abstract(paper_whole_text)
        Relevance = False
        if abstract:  # 如果有摘要，则判断标题与摘要的相关度
            # 将标题和摘要作为资料，与topic进行相关度判断
            Relevance = await self.judge_relevance_by_text(paper_title, abstract, topic)
        return Relevance

    async def FirRetrieval(self, topic_query_1, topic_query_2, kw_queries):
        async def keywords_retrieval(kw_queries):
            # keywords 去重
            keywords = set()
            for kw_query in kw_queries:
                for kw in kw_query:
                    keywords.add(kw)
            keywords = list(keywords)
            # 进行关键词检索
            tasks = [self.retrieve_paper_metadata_that_title_contain(keyword, top_k=4000) for keyword in keywords]
            # 执行异步任务，得到包含各关键词的论文标题集合。
            ret_results = await asyncio.gather(*tasks)
            # 构建检索结果的字典结构
            paper_dict = {keyword: ret_result for keyword, ret_result in zip(keywords, ret_results)}
            paper_titles_caps = []
            # 取交集
            for kw_query in kw_queries:
                paper_sets = [paper_dict[key] for key in kw_query if key in paper_dict]
                if paper_sets:
                    paper_titles_caps.append(set.intersection(*paper_sets))
                else:
                    paper_titles_caps.append(set())
            # 取并集
            if paper_titles_caps:
                paper_titles_cup = set.union(*paper_titles_caps)
            else:
                paper_titles_cup = set()
            return paper_titles_cup

        async def topics_retrieval(topic_query_1, topic_query_2):
            async def distill_papers(paper_titles_topic, topic):
                distilled_paper_titles_topic = set()
                paper_titles_topic_list = list(paper_titles_topic)
                tasks = [self.judge_relevance(paper_title, topic) for paper_title in paper_titles_topic_list]
                judge_results = await asyncio.gather(*tasks)
                # 遍历判断结果，如果为真，则将该标题加入相关论文列表
                for index, judge_result in enumerate(judge_results):
                    if judge_result:
                        distilled_paper_titles_topic.add(paper_titles_topic_list[index])
                return distilled_paper_titles_topic

            # 进行主题检索
            tasks = [self.retrieve_paper_metadata_by_title_like(topic_query_1, top_k=50),
                     self.retrieve_paper_metadata_by_title_like(topic_query_2, top_k=50)]
            ret_results = await asyncio.gather(*tasks)
            if ret_results:
                # 取交集
                paper_titles_cap = set.intersection(*ret_results)
                # 蒸馏通过topic检索到的论文交集
                distilled_paper_titles_cap = await distill_papers(paper_titles_cap, topic_query_1)
            else:
                return set()
            return distilled_paper_titles_cap

        # 两种检索方式异步进行
        paper_titles_kw, paper_titles_topic = await asyncio.gather(
            *[keywords_retrieval(kw_queries), topics_retrieval(topic_query_1, topic_query_2)])
        # 取交集和并集
        cap = paper_titles_kw.intersection(paper_titles_topic)
        cup = paper_titles_kw.union(paper_titles_topic)
        ## 首先考虑最可能完全相关的论文：交集cap
        if len(cap) > self.num_paper_max:
            paper_titles_set4outline = set(random.sample(list(cap), self.num_paper_max))
        elif len(cap) >= self.num_paper_threshold:
            paper_titles_set4outline = cap
        ## 其次考虑较可能完全相关的论文：由关键词硬匹配得到的论文
        elif len(paper_titles_kw) > self.num_paper_max:
            paper_titles_set4outline = set(random.sample(list(paper_titles_kw), self.num_paper_max))
        elif len(paper_titles_kw) >= self.num_paper_threshold:
            paper_titles_set4outline = paper_titles_kw
        ## 最后考虑两种检索方法的并集
        elif len(cup) > self.num_paper_max:
            ## 如果此时并集论文数量超过论文数量最大限度，则进行随机采样
            paper_titles_set4outline = set(random.sample(list(cup), self.num_paper_max))
        else:
            ## 如果此时并集论文数量小于论文数量最大限度，则直接返回并集，不管是否超过论文最低阈值
            # todo 当低于论文最低阈值，则基于之前检索的论文进行基于摘要的检索
            paper_titles_set4outline = cup
        # 返回并集中没有被用于生成提纲的论文集合
        paper_titles_set_un_chosen4outline = cup - paper_titles_set4outline
        return paper_titles_set4outline, paper_titles_set_un_chosen4outline

    async def SecRetrieval(self, sec_ret_target_num, paper_titles_set4outline, paper_titles_set_un_chosen4outline,
                           semaphores):
        new_papers_Retrieval = set()  # 存储新检索到的论文

        async def fetch(session, url, params):
            async with session.get(url, params=params) as response:
                return await response.json()

        async def retrieve_papers_by_abstract(abstract):
            paper_titles = []
            async with aiohttp.ClientSession() as session:
                try:
                    params = {"query": abstract, "top_k": 50}
                    response_data = await fetch(session, self.url_by_chunk, params)
                    for data in response_data:
                        paper_title = data["entity"]["paper_title"]
                        paper_titles.append(paper_title)
                    return paper_titles
                except Exception as e:
                    print(f"Error in retrieve_papers_by_abstract: {e}")
                    return []

        async def process_single_paper(title):
            """基于论文摘要检索新的论文"""
            # 获取论文chunk的内容
            chunked_paper = await self.retrieve_chunked_paper_by_paper_title(title, top_k=1)
            if not chunked_paper:
                return set()
            # 提取摘要
            abstract = await self.extract_abstract(chunked_paper[0]["chunk_text"])
            if not abstract:
                return set()
            # 使用摘要匹配检索新论文
            related_papers = await retrieve_papers_by_abstract(abstract)
            return related_papers

        # 并发检索所有论文摘要并匹配新论文
        tasks = [process_single_paper(title) for title in paper_titles_set4outline]
        all_related_papers = await asyncio.gather(*tasks)
        # 处理检索到的论文
        # for related_papers in all_related_papers:
        #     for paper in related_papers:
        #         # 只加入新的论文
        #         if paper not in paper_titles_set4outline and paper not in paper_titles_set_un_chosen4outline:
        #             new_papers_Retrieval.add(paper)
        #             # 若达到目标论文数，则停止
        #         if len(new_papers_Retrieval) >= sec_ret_target_num:
        #             break
        #     if len(new_papers_Retrieval) >= sec_ret_target_num:
        #         break
        # 处理检索到的论文
        min_len = min(len(sublist) for sublist in all_related_papers)
        print(f"最小长度{min_len}\n")
        # 从all_related_papers每个元素的第一个元素遍历完成后，再去找每个元素的第二个元素，以此类推
        for index in range(min_len):
            for related_papers in all_related_papers:
                paper = related_papers[index]
                if paper not in paper_titles_set4outline and paper not in paper_titles_set_un_chosen4outline:
                    new_papers_Retrieval.add(paper)
                if len(new_papers_Retrieval) >= sec_ret_target_num:
                    break
            if len(new_papers_Retrieval) >= sec_ret_target_num:
                break
        return new_papers_Retrieval

    async def ThirdRetrieval(self):
        """
        :param Topic:
        :return:
        当前两次检索都没有找够文献的时候，启动第三次检索，根据self.paper_ids_list中的论文id我找到摘要进行文本的检索
        """
        # 多次反复循环检索
        # todo: 异步获取
        pairs = list(combinations(self.paper_ids_list, 2))  # 生成两两组合的列表
        async with aiohttp.ClientSession() as session:
            while len(self.paper_ids_list) < self.num_paper_threshold:
                # 限制添加论文数，防止超过最大值

                available_slots = self.num_paper_max - len(self.paper_ids_list)
                if available_slots <= 0:
                    print("Reached max paper limit.")
                    break

                # 检测是否有可用的 pair
                if not pairs:
                    print("No more pairs available for retrieval.")
                    break
                # 对每个 pair 创建异步任务
                tasks = [self.Search_paper_by_abstract(session, pair[0]) for pair in pairs[:2]]  # 每次处理两个 pair
                results = await asyncio.gather(*tasks)  # 异步并行执行任务
                # 收集新检索到的论文ID
                new_paper_ids_set = set()
                for result in results:
                    new_paper_ids_set.update(result)
                # 更新 self.paper_ids_list，避免重复添加
                self.paper_ids_list.extend(new_paper_ids_set - set(self.paper_ids_list))
                # 更新 pairs 列表，去掉已处理的前两个 pair
                pairs = pairs[2:]

        return self.paper_ids_list

    async def Search_paper_by_abstract(self, session, paper_id):
        text = ''  # 该paper_id对应的论文的chunk_text
        abstract = ''  # 对应的论文的摘要
        paperlist_2_id = []

        async def fetch(session, url, params):
            async with session.get(url, params=params) as response:
                return await response.json()

        # todo: fetch结果结构有所改变
        datas = await fetch(session, url=self.url_by_paper_id, params={'paper_id': paper_id, "top_k": 10})
        for data in datas:
            # 摘要在chunk_id为0的那个chunk_text中
            if data['chunk_id'] == 0:
                text = data['chunk_text']
                break
        match = re.search(r"# Abstract\n(.*?)(?:\n#|$)", text, re.DOTALL)
        if match:
            abstract = match.group(1).strip()  # 提取并去除首尾多余的空格

        datas_1 = await fetch(session, self.url_by_chunk, params={"query": abstract, "top_k": 50})
        for data in datas_1:
            paperid = data["entity"]["paper_id"]
            paperlist_2_id.append(paperid)
        return paperlist_2_id

    def BaseRetrieval(self, url: str, params: dict):
        """向第三方接口发送get请求功能作为一个通用调用的函数"""
        try:
            # 向三方提供的api接口发出get请求
            response = requests.get(url, params=params)
            if response.status_code == 200:
                # 解析响应 JSON
                return response.json()
            else:
                print(f"请求失败，状态码: {response.status_code}, 响应: {response.text}")
                return []  # 返回空列表，避免后续迭代错误
        except Exception as e:
            print(f"请求过程中发生错误: {e}")
            return []  # 返回空列表，避免后续迭代错误

    def AddPaper(self, paper_ids):
        cur_paper_ids = self.paper_ids_list
        # 若全部接受，则预期的收录文章集合
        num_new_paper = len(set(paper_ids) - set(cur_paper_ids))
        if num_new_paper + len(cur_paper_ids) <= self.num_paper_max:
            # 若全部收录都不超过上限，则全收
            self.paper_ids_list = list(set(paper_ids) | set(cur_paper_ids))
        else:
            # 否则，只收录部分
            sample_num = self.num_paper_max - len(cur_paper_ids)
            sampled_paper_ids = random.sample(set(paper_ids) - set(cur_paper_ids), sample_num)
            self.paper_ids_list = cur_paper_ids + sampled_paper_ids
        return len(self.paper_ids_list) >= self.num_paper_threshold

    async def GetPaperContent_by_title(self, paper_titles_set):
        paper_titles_list = list(paper_titles_set)
        tasks = [self.retrieve_chunked_paper_by_paper_title(paper_title=paper_title, top_k=15)
                 for paper_title in paper_titles_list]
        chunked_papers = await asyncio.gather(*tasks)
        chunks = [element for sub_list in chunked_papers for element in sub_list]
        # 将论文标题与对应chunked论文构成键值对然后构建字典
        return chunks

    async def retrival_reference_based_on_description(self, outline, topic):
        # 创建任务列表，每个任务处理一个节点
        tasks = [self.ret_ref_for_leaf(leafnode, topic) for leafnode in outline.getSubTreeLeaves()]
        # 并发执行所有任务
        await asyncio.gather(*tasks)
        # 统计所有被检索到的chunk
        reference_chunks_c = []
        for leafnode in outline.getSubTreeLeaves():
            for ref_chunk in leafnode.data['reference']:
                if ref_chunk['chunk_ext_id'] not in {chunk['chunk_ext_id'] for chunk in reference_chunks_c}:
                    reference_chunks_c.append(ref_chunk)
        return reference_chunks_c

    async def ret_ref_for_leaf(self, leafnode, topic):
        # 根据节点的 description 检索 reference
        description = leafnode.data['description']
        query = f"{description}"
        chunks = self.BaseRetrieval(url=self.url_by_chunk, params={"query": query, "top_k": 10})
        reference_chunks = []
        paper_ids = []
        for chunk in chunks:
            if len(paper_ids) >= 3 and chunk['distance'] < 0.6:
                # 如果已经找到足够的参考论文，并且相似度小于阈值，则跳出循环
                break
            if chunk['entity']['paper_id'] not in paper_ids:
                paper_ids.append(chunk['entity']['paper_id'])
            name, year = ext_name_year(chunk['entity']['original_filename'])
            ref_chunk = {'chunk_content': chunk['entity']['chunk_text'],
                         'paper_title': chunk['entity']['paper_title'],
                         'source_name': name,
                         'year': year,
                         'chunk_id': chunk['entity']['chunk_id'],
                         'chunk_ext_id': chunk['id']}
            reference_chunks.append(ref_chunk)
        # 更新节点的 reference
        leafnode.data['reference'] = reference_chunks

    # # 优化引用的检索函数
    # async def retrival_chunk_based_on_titie_chunknumber(self, references_dict):
    #     async def retrieve_by_paper_title(title, url):
    #         """异步检索给定关键词的论文ID"""
    #         try:
    #             async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=60)) as session:
    #                 params = {"title": title, "top_k": 2000}
    #                 async with session.get(url, params=params) as response:
    #                     if response.status == 200:
    #                         papers = await response.json()
    #                         # return [(paper['paper_id'], paper['paper_title']) for paper in papers]
    #                         return papers
    #                     else:
    #                         print(f"Error: {response.status} for word '{title}'")
    #                         return []
    #         except Exception as e:
    #             print(f"Exception for word '{title}': {e}")
    #             return []
    #
    #     tasks = []
    #     for paperid, (title, chunk) in references_dict.items():
    #         task = retrieve_by_paper_title(title, self.url_by_title_contain)
    #         tasks.append(task)
    #     # 等待所有任务完成并获取结果
    #     chunks_list = await asyncio.gather(*tasks)
    #     # 更新 references_dict，添加获取到的 chunks 信息
    #     for idx, (paperid, (title, chunk)) in enumerate(references_dict.items()):
    #         chunks = chunks_list[idx]
    #         references_dict[paperid] = (title, chunk, chunks)
    #
    #     pass
    #
    # def retrieve_by_paper_title_contain(self, title, url):
    #     try:
    #         params = {"title": title,"top_k":100}
    #         response = requests.get(url, params=params)
    #         if response.status_code == 200:
    #             # 解析响应 JSON
    #             result= response.json()
    #             return result
    #         else:
    #             print(f"Error: {response.status_code} for word '{title}'")
    #             return []
    #     except Exception as e:
    #         print(f"Exception for word '{title}': {e}")
    #         return []


if __name__ == '__main__':
    ret = RetrievalAgent('d185f605f1b547348750185c108bd63e.pMd0SEPulFz3XvqF')
    kw_queries = [['Multimodal', 'Large', 'Model'], ['Large', 'Multimodal', 'AI'], ['Holistic', 'Multimodal']]
    topic_query_1 = '多模态大模型'
    topic_query_2 = '**Multimodal Large Models**'
    # kw_queries = [['Large', 'Model', 'Planning'], ['Augmenting', 'Large', 'Model', 'Scheduling'], ['Large', 'Model', 'Strategic', 'Planning']]
    # topic_query_1 = '提升大模型的规划能力'
    # topic_query_2 = '"Enhancing the Planning Capabilities of Large Models"'
    # asyncio.run(ret.FirRetrieval(topic_query_1, topic_query_2, kw_queries))
    abs = """The role of data in building AI systems has recently been emphasized by the emerging concept of data-centric AI. Unfortunately, in the real-world, datasets may contain dirty samples, such as poisoned samples from backdoor attack, noisy labels in crowdsourcing, and even hybrids of them. The presence of such dirty samples makes the DNNs vunerable and unreliable. Hence, it is critical to detect dirty samples to improve the quality and realiability of dataset. Existing detectors only focus on detecting poisoned samples or noisy labels, that are often prone to weak generalization when dealing with dirty samples from other domains. In this paper, we find a commonality of various dirty samples is visual-linguistic inconsistency between images and associated labels. To capture the semantic inconsistency between modalities, we propose versatile data cleanser (VDC) leveraging the surpassing capabilities of multimodal large language models (MLLM) in cross-modal alignment and reasoning. It consists of three consecutive modules: the visual question generation module to generate insightful questions about the image; the visual question answering module to acquire the semantics of the visual content by answering the questions with MLLM; followed by the visual answer evaluation module to evaluate the inconsistency. Extensive experiments demonstrate its superior performance and generalization to various categories and types of dirty samples."""
    topic = "Large Language Model"
    title = "VDC: V ERSATILE DATA CLEANSER FOR DETECTING DIRTY SAMPLES VIA VISUAL -L INGUISTIC I NCONSIS -TENCY"
    loop = asyncio.get_event_loop()
    print(loop.run_until_complete(ret.judge_relevance_by_text(title, abs, topic)))


    # 优化引用的检索函数
    async def retrival_chunk_based_on_titie_chunknumber(self, references_dict):
        async def retrieve_by_paper_title(title, url):
            """异步检索给定关键词的论文ID"""
            try:
                async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=60)) as session:
                    params = {"title": title, "top_k": 2000}
                    async with session.get(url, params=params) as response:
                        if response.status == 200:
                            papers = await response.json()
                            # return [(paper['paper_id'], paper['paper_title']) for paper in papers]
                            return papers
                        else:
                            print(f"Error: {response.status} for word '{title}'")
                            return []
            except Exception as e:
                print(f"Exception for word '{title}': {e}")
                return []

        tasks = []
        for paperid, (title, chunk) in references_dict.items():
            task = retrieve_by_paper_title(title, self.url_by_title_contain)
            tasks.append(task)
        # 等待所有任务完成并获取结果
        chunks_list = await asyncio.gather(*tasks)
        # 更新 references_dict，添加获取到的 chunks 信息
        for idx, (paperid, (title, chunk)) in enumerate(references_dict.items()):
            chunks = chunks_list[idx]
            references_dict[paperid] = (title, chunk, chunks)

        pass


    def retrieve_by_paper_title_contain(self, title, url):
        try:
            params = {"title": title, "top_k": 100}
            response = requests.get(url, params=params)
            if response.status_code == 200:
                # 解析响应 JSON
                result = response.json()
                return result
            else:
                print(f"Error: {response.status_code} for word '{title}'")
                return []
        except Exception as e:
            print(f"Exception for word '{title}': {e}")
            return []

# if __name__ == "__main__":
#     token = "1befca36e6cf4e91adcecb3840c21a8f.jsIlvPdjxnC6HT1o"
#     retrievalagent_instance = RetrievalAgent(token)
#     title = 'MNGNAS: Distilling Adaptive Combination of Multiple Searched Networks for One-Shot Neural Architecture Search'
#     # chunk = 0
#     # result = retrievalagent_instance.retrieve_by_paper_title_contain(title,
#     #                                                                  retrievalagent_instance.url_by_title_contain)
#
#     # print(result)
