import configparser
import os
import re
import time
from pymysql.cursors import DictCursor
import pymysql
import yaml
from langchain_community.tools.google_scholar import GoogleScholarQueryRun
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
import requests
from openai import OpenAI
from serpapi import GoogleSearch



# title = "An Analysis of Recent Advances in Deepfake Image Detection in an Evolving Threat Landscape."

# tool = GoogleScholarQueryRun(api_wrapper=GoogleScholarAPIWrapper(serp_api_key=api_key))
# Title: An analysis of recent advances in deepfake image detection in an evolving threat landscape
# Authors: SM Abdullah,A Cheruvu,S Kanchi
# Summary: SM Abdullah, A Cheruvu, S Kanchi… - … IEEE Symposium on …, 2024 - ieeexplore.ieee.org
# Total-Citations: 15
# result = tool.run(title,)
# print(result)



# res = requests.get("https://serpapi.com/search.json", params=params)
# data = res.json()
#
# # 输出第一个结果的摘要片段（snippet）
# print(data["organic_results"][0]["snippet"])
# print("\n\n")
# print(data)
# Deepfake or synthetic images produced using deep generative models pose serious risks to online platforms. This has triggered several research efforts to accurately detect deepfake images, achieving excellent performance on publicly available deepfake datasets. In this work, we study 8 state-of-the-art detectors and argue that they are far from being ready for deployment due to two recent developments. First, the emergence of lightweight methods to customize large generative models, can enable an attacker to create many customized …
#
#
#
# {'search_metadata': {'id': '67ee3439226705b3e65dffc7', 'status': 'Success', 'json_endpoint': 'https://serpapi.com/searches/e6d5109077de7652/67ee3439226705b3e65dffc7.json', 'created_at': '2025-04-03 07:09:45 UTC', 'processed_at': '2025-04-03 07:09:45 UTC', 'google_scholar_url': 'https://scholar.google.com/scholar?q=An+Analysis+of+Recent+Advances+in+Deepfake+Image+Detection+in+an+Evolving+Threat+Landscape.&hl=en', 'raw_html_file': 'https://serpapi.com/searches/e6d5109077de7652/67ee3439226705b3e65dffc7.html', 'total_time_taken': 0.55}, 'search_parameters': {'engine': 'google_scholar', 'q': 'An Analysis of Recent Advances in Deepfake Image Detection in an Evolving Threat Landscape.', 'hl': 'en'}, 'search_information': {'organic_results_state': 'Results for exact spelling', 'query_displayed': 'An Analysis of Recent Advances in Deepfake Image Detection in an Evolving Threat Landscape.'}, 'profiles': {'link': 'https://scholar.google.com/scholar?lookup=0&q=An+Analysis+of+Recent+Advances+in+Deepfake+Image+Detection+in+an+Evolving+Threat+Landscape.&hl=en&as_sdt=0,22', 'serpapi_link': 'https://serpapi.com/search.json?engine=google_scholar_profiles&hl=en&mauthors=An+Analysis+of+Recent+Advances+in+Deepfake+Image+Detection+in+an+Evolving+Threat+Landscape.'}, 'organic_results': [{'position': 1, 'title': 'An analysis of recent advances in deepfake image detection in an evolving threat landscape', 'result_id': 'ENtMLQvHs84J', 'link': 'https://ieeexplore.ieee.org/abstract/document/10646853/', 'snippet': 'Deepfake or synthetic images produced using deep generative models pose serious risks to online platforms. This has triggered several research efforts to accurately detect deepfake images, achieving excellent performance on publicly available deepfake datasets. In this work, we study 8 state-of-the-art detectors and argue that they are far from being ready for deployment due to two recent developments. First, the emergence of lightweight methods to customize large generative models, can enable an attacker to create many customized …', 'publication_info': {'summary': 'SM Abdullah, A Cheruvu, S Kanchi… - … IEEE Symposium on …, 2024 - ieeexplore.ieee.org', 'authors': [{'name': 'SM Abdullah', 'link': 'https://scholar.google.com/citations?user=YecMLaAAAAAJ&hl=en&oi=sra', 'serpapi_scholar_link': 'https://serpapi.com/search.json?author_id=YecMLaAAAAAJ&engine=google_scholar_author&hl=en', 'author_id': 'YecMLaAAAAAJ'}, {'name': 'A Cheruvu', 'link': 'https://scholar.google.com/citations?user=nfn3FFQVva4C&hl=en&oi=sra', 'serpapi_scholar_link': 'https://serpapi.com/search.json?author_id=nfn3FFQVva4C&engine=google_scholar_author&hl=en', 'author_id': 'nfn3FFQVva4C'}, {'name': 'S Kanchi', 'link': 'https://scholar.google.com/citations?user=VsdYF68AAAAJ&hl=en&oi=sra', 'serpapi_scholar_link': 'https://serpapi.com/search.json?author_id=VsdYF68AAAAJ&engine=google_scholar_author&hl=en', 'author_id': 'VsdYF68AAAAJ'}]}, 'resources': [{'title': 'arxiv.org', 'file_format': 'PDF', 'link': 'https://arxiv.org/pdf/2404.16212'}], 'inline_links': {'serpapi_cite_link': 'https://serpapi.com/search.json?engine=google_scholar_cite&hl=en&q=ENtMLQvHs84J', 'cited_by': {'total': 15, 'link': 'https://scholar.google.com/scholar?cites=14894467243462941456&as_sdt=40000005&sciodt=0,22&hl=en', 'cites_id': '14894467243462941456', 'serpapi_scholar_link': 'https://serpapi.com/search.json?as_sdt=40000005&cites=14894467243462941456&engine=google_scholar&hl=en'}, 'related_pages_link': 'https://scholar.google.com/scholar?q=related:ENtMLQvHs84J:scholar.google.com/&scioq=An+Analysis+of+Recent+Advances+in+Deepfake+Image+Detection+in+an+Evolving+Threat+Landscape.&hl=en&as_sdt=0,22', 'serpapi_related_pages_link': 'https://serpapi.com/search.json?as_sdt=0%2C22&engine=google_scholar&hl=en&q=related%3AENtMLQvHs84J%3Ascholar.google.com%2F', 'versions': {'total': 6, 'link': 'https://scholar.google.com/scholar?cluster=14894467243462941456&hl=en&as_sdt=0,22', 'cluster_id': '14894467243462941456', 'serpapi_scholar_link': 'https://serpapi.com/search.json?as_sdt=0%2C22&cluster=14894467243462941456&engine=google_scholar&hl=en'}}}, {'position': 2, 'title': 'An Analysis of Recent Advances in Deepfake Image Detection in an Evolving Threat Landscape, arxiv', 'result_id': 'Vew1UceuHAMJ', 'type': 'Citation', 'snippet': '', 'publication_info': {'summary': 'SM Abdullah - 2024'}, 'inline_links': {'serpapi_cite_link': 'https://serpapi.com/search.json?engine=google_scholar_cite&hl=en&q=Vew1UceuHAMJ', 'cited_by': {'total': 3, 'link': 'https://scholar.google.com/scholar?cites=224246252545895509&as_sdt=40000005&sciodt=0,22&hl=en', 'cites_id': '224246252545895509', 'serpapi_scholar_link': 'https://serpapi.com/search.json?as_sdt=40000005&cites=224246252545895509&engine=google_scholar&hl=en'}, 'related_pages_link': 'https://scholar.google.com/scholar?q=related:Vew1UceuHAMJ:scholar.google.com/&scioq=An+Analysis+of+Recent+Advances+in+Deepfake+Image+Detection+in+an+Evolving+Threat+Landscape.&hl=en&as_sdt=0,22', 'serpapi_related_pages_link': 'https://serpapi.com/search.json?as_sdt=0%2C22&engine=google_scholar&hl=en&q=related%3AVew1UceuHAMJ%3Ascholar.google.com%2F'}}]}
#
# search = GoogleSearch(params)
# results = search.get_dict()
# organic_results = results["organic_results"]
# print(organic_results)
# organic_results[0]['link']
# organic_results[0]['snippet']
# [{'position': 1, 'title': 'An analysis of recent advances in deepfake image detection in an evolving threat landscape', 'result_id': 'ENtMLQvHs84J', 'link': 'https://ieeexplore.ieee.org/abstract/document/10646853/', 'snippet': 'Deepfake or synthetic images produced using deep generative models pose serious risks to online platforms. This has triggered several research efforts to accurately detect deepfake images, achieving excellent performance on publicly available deepfake datasets. In this work, we study 8 state-of-the-art detectors and argue that they are far from being ready for deployment due to two recent developments. First, the emergence of lightweight methods to customize large generative models, can enable an attacker to create many customized …', 'publication_info': {'summary': 'SM Abdullah, A Cheruvu, S Kanchi… - … IEEE Symposium on …, 2024 - ieeexplore.ieee.org', 'authors': [{'name': 'SM Abdullah', 'link': 'https://scholar.google.com/citations?user=YecMLaAAAAAJ&hl=en&oi=sra', 'serpapi_scholar_link': 'https://serpapi.com/search.json?author_id=YecMLaAAAAAJ&engine=google_scholar_author&hl=en', 'author_id': 'YecMLaAAAAAJ'}, {'name': 'A Cheruvu', 'link': 'https://scholar.google.com/citations?user=nfn3FFQVva4C&hl=en&oi=sra', 'serpapi_scholar_link': 'https://serpapi.com/search.json?author_id=nfn3FFQVva4C&engine=google_scholar_author&hl=en', 'author_id': 'nfn3FFQVva4C'}, {'name': 'S Kanchi', 'link': 'https://scholar.google.com/citations?user=VsdYF68AAAAJ&hl=en&oi=sra', 'serpapi_scholar_link': 'https://serpapi.com/search.json?author_id=VsdYF68AAAAJ&engine=google_scholar_author&hl=en', 'author_id': 'VsdYF68AAAAJ'}]}, 'resources': [{'title': 'arxiv.org', 'file_format': 'PDF', 'link': 'https://arxiv.org/pdf/2404.16212'}], 'inline_links': {'serpapi_cite_link': 'https://serpapi.com/search.json?engine=google_scholar_cite&hl=en&q=ENtMLQvHs84J', 'cited_by': {'total': 15, 'link': 'https://scholar.google.com/scholar?cites=14894467243462941456&as_sdt=40000005&sciodt=0,22&hl=en', 'cites_id': '14894467243462941456', 'serpapi_scholar_link': 'https://serpapi.com/search.json?as_sdt=40000005&cites=14894467243462941456&engine=google_scholar&hl=en'}, 'related_pages_link': 'https://scholar.google.com/scholar?q=related:ENtMLQvHs84J:scholar.google.com/&scioq=An+Analysis+of+Recent+Advances+in+Deepfake+Image+Detection+in+an+Evolving+Threat+Landscape.&hl=en&as_sdt=0,22', 'serpapi_related_pages_link': 'https://serpapi.com/search.json?as_sdt=0%2C22&engine=google_scholar&hl=en&q=related%3AENtMLQvHs84J%3Ascholar.google.com%2F', 'versions': {'total': 6, 'link': 'https://scholar.google.com/scholar?cluster=14894467243462941456&hl=en&as_sdt=0,22', 'cluster_id': '14894467243462941456', 'serpapi_scholar_link': 'https://serpapi.com/search.json?as_sdt=0%2C22&cluster=14894467243462941456&engine=google_scholar&hl=en'}}}, {'position': 2, 'title': 'An Analysis of Recent Advances in Deepfake Image Detection in an Evolving Threat Landscape, arxiv', 'result_id': 'Vew1UceuHAMJ', 'type': 'Citation', 'snippet': '', 'publication_info': {'summary': 'SM Abdullah - 2024'}, 'inline_links': {'serpapi_cite_link': 'https://serpapi.com/search.json?engine=google_scholar_cite&hl=en&q=Vew1UceuHAMJ', 'cited_by': {'total': 3, 'link': 'https://scholar.google.com/scholar?cites=224246252545895509&as_sdt=40000005&sciodt=0,22&hl=en', 'cites_id': '224246252545895509', 'serpapi_scholar_link': 'https://serpapi.com/search.json?as_sdt=40000005&cites=224246252545895509&engine=google_scholar&hl=en'}, 'related_pages_link': 'https://scholar.google.com/scholar?q=related:Vew1UceuHAMJ:scholar.google.com/&scioq=An+Analysis+of+Recent+Advances+in+Deepfake+Image+Detection+in+an+Evolving+Threat+Landscape.&hl=en&as_sdt=0,22', 'serpapi_related_pages_link': 'https://serpapi.com/search.json?as_sdt=0%2C22&engine=google_scholar&hl=en&q=related%3AVew1UceuHAMJ%3Ascholar.google.com%2F'}}]

import requests
import logging

from black_list import Blacklist

serpapi_api_key_index = 0
# api_key = os.environ.get('serpapi_api_key', 'dccb501df33da7b3d2188a0fad20317c13160d31443c865a55adcd580151f896')
def get_paper_abstract(title, serpapi_conf):
    return get_paper_abstract_v2(title, serpapi_conf)
    # url = "https://api.semanticscholar.org/graph/v1/paper/search"
    # params = {
    #     "query": title,
    #     "fields": "title,abstract,authors,year,externalIds"
    # }
    # if serpapi_conf is not None:
    #     api_key = serpapi_conf['api-key']
    # else:
    #     api_key = ""
    # abstract_result = ""
    # for i in range(0, 100):
    #     response = requests.get(url, params=params)
    #     if response.status_code != 200:
    #         logging.warning(f"{title} get details failed. {response.status_code}, {response.content}")
    #         for i in range(0, 100):
    #             logging.warning(f"{title} get details failed. {i + 1} times. {response.status_code}, {response.content}")
    #             response = requests.get(url, params=params)
    #             if response.status_code == 200:
    #                 break
    #             else:
    #                 time.sleep(1)
    #                 continue
    #     data = response.json()
    #     if data.get("data"):
    #         paper = data["data"][0]  # 默认取第一个匹配项
    #         logging.info(f"{title} get details success. data = {data}")
    #         logging.info(f"{title} get details success. info = {paper}")
    #         # print(f"Title: {paper['title']}")
    #         # print(f"Year: {paper['year']}")
    #         # print(f"Authors: {[a['name'] for a in paper['authors']]}")
    #         # print("Abstract:")
    #         # print(paper['abstract'])
    #         abstract_result = paper['abstract']
    #         break
    #     else:
    #         logging.warning(f"{title} No matching paper found, try {i + 1} times.")
    #         # time.sleep(1)
    #         break
    # if abstract_result is None or len(abstract_result) < 10:
    #     params = {
    #         "engine": "google_scholar",
    #         "q": title
    #     }
    #     if api_key is not None:
    #         if type(api_key) is str:
    #             params["api_key"] = api_key
    #         elif type(api_key) is list:
    #             global serpapi_api_key_index
    #             params["api_key"] = api_key[serpapi_api_key_index % len(api_key)]
    #             serpapi_api_key_index = (serpapi_api_key_index + 1) % len(api_key)
    #
    #     res = requests.get("https://serpapi.com/search.json", params=params)
    #     data = res.json()
    #     if abstract_result is None or len(abstract_result) < 10:
    #         logging.warning(f"{title} get abstract failed by serpapi. {res.status_code}, {res.content}")
    #         abstract_result = "No abstract available"
    #     else:
    #         logging.info(f"{title} get abstract success by serpapi. data = {data}")
    #         abstract_result = data["organic_results"][0]["snippet"]
    # logging.info(f"{title} Get abstract done.{abstract_result}")
    # return abstract_result

def get_paper_abstract_v2(title, serpapi_conf):
    url = "https://api.semanticscholar.org/graph/v1/paper/search"
    params = {
        "query": title,
        "fields": "title,abstract,authors,year,externalIds"
    }
    abstract_result = ""
    max_retries = 3
    # Try Semantic Scholar first
    for attempt in range(max_retries):
        try:
            response = requests.get(url, params=params, timeout=10)
            if response.status_code == 200:
                data = response.json()
                if data.get("data"):
                    paper = data["data"][0]
                    abstract_result = paper.get('abstract', '')
                    logging.info(f"{title} abstract from Semantic Scholar success. abstract = {abstract_result}")
                    break
                else:
                    logging.warning(f"{title}: No paper found on Semantic Scholar.")
                    break
            else:
                logging.warning(
                    f"{title}: Semantic Scholar request failed. Status {response.status_code}, attempt {attempt + 1}")
        except requests.RequestException as e:
            logging.warning(f"{title}: Semantic Scholar request exception: {e}, attempt {attempt + 1}")
        time.sleep(1)
    # If failed or too short, try SerpAPI fallback
    if not abstract_result or len(abstract_result) < 10:
        serpapi_url = "https://serpapi.com/search.json"
        params = {
            "engine": "google_scholar",
            "q": title
        }
        file_name = 'serpapi_api_key_blacklist.json'
        # file_path = os.path.join(os.path.dirname(__file__), file_name)
        blacklist = Blacklist.load_blacklist(file_name, block_time="1M")
        for attempt in range(max_retries):
            unblock_list = []
            if serpapi_conf is not None:
                api_key = serpapi_conf.get('api-key', '')
                if isinstance(api_key, str):
                    api_key = [api_key]
                if isinstance(api_key, list):
                    # global serpapi_api_key_index
                    # api_key = api_key[serpapi_api_key_index % len(api_key)]
                    # serpapi_api_key_index = (serpapi_api_key_index + 1) % len(serpapi_conf['api-key'])
                    unblock_list = blacklist.get_unblocked_items(api_key)
                if len(unblock_list) > 0:
                    global serpapi_api_key_index
                    # params["api_key"] = unblock_list[serpapi_api_key_index % len(unblock_list)]
                    params["api_key"] = blacklist.get_next_unblocked_item(unblock_list)
                    serpapi_api_key_index = (serpapi_api_key_index + 1) % len(unblock_list)
                else:
                    logging.warning("no serpapi api key available, skip SerpAPI search.")
                    break
            try:
                res = requests.get(serpapi_url, params=params, timeout=10)
                if res.status_code == 200:
                    data = res.json()
                    if "organic_results" in data and len(data["organic_results"]) > 0:
                        abstract_result = data["organic_results"][0].get("snippet", "")
                        logging.info(f"Abstract found via SerpAPI: {title} ")
                    else:
                        logging.warning(f"{title}: No abstract snippet in SerpAPI response.")
                    break
                elif res.status_code == 429:

                    logging.warning(f"{title}: SerpAPI rate limit exceeded. Status {res.status_code}, attempt {attempt + 1}")
                    logging.debug(f"res --> {res}")
                    blacklist.block_item(params["api_key"])
                    blacklist.save()
                else:
                    logging.warning(f"{title}: SerpAPI failed. Status {res.status_code}, attempt {attempt + 1}")
            except requests.RequestException as e:
                logging.warning(f"{title}: SerpAPI request exception: {e}, attempt {attempt + 1}")
            time.sleep(1)
    # Final fallback
    if not abstract_result or len(abstract_result) < 10:
        abstract_result = "No abstract available"
        logging.info(f"{title}: Abstract fallback to default.")
    return abstract_result

def extract_keywords(input_str, keyword):
    index = input_str.find(keyword)
    if index != -1:
        # 删除从开头到 keyword（含 keyword）之前的内容
        result = input_str[index + len(keyword):]
    else:
        # 如果找不到 keyword，就返回原字符串
        result = input_str
    return result.strip()

def fetch_paper_keywords(papers, config):
    db_conf = config['db']
    conn = pymysql.connect(
        host=db_conf['ip'],
        user=db_conf['user'],
        password=db_conf['password'],
        database=db_conf['schema'],
        port=db_conf['port']
    )
    cursor = conn.cursor(pymysql.cursors.DictCursor)
    for sites in papers.keys():
        for paper in papers[sites]:
            paper_abstract = paper['summary'] if paper['summary'] else ''
            if len(paper_abstract) < 1:
                continue
            else:
                _api_key = config['scholar']['gpt']['api-key']
                _model = config['scholar']['gpt']['model']
                # prompt = "你现在要扮演一个专业的计算机和网络空间安全从业者，我会给你一段论文的摘要，你需要通过这个摘要提供给我对应的论文关键词。你只需要回复给我你认为最接近的三个关键词。此外，不管我给你的内容是什么语言的，你的回复都需要是英文的，且你回复的格式应当如下所示'1: keywords 1 2: keywords 2 3: keywords 3'。以下是我需要你分析的论文摘要："
                prompt =  config['scholar']['gpt']['prompt']
                # base_url = 'http://172.17.1.51:11434/v1/chat/completions' #http://172.17.1.51:11434/v1/chat/completions
                base_url = config['scholar']['gpt']['base_url'] #'http://172.17.1.51:11434/v1/chat/completions' #http://172.17.1.51:11434/v1/chat/completions
                payload ={
                    'model': _model,
                    "messages": [
                        {"role": "user", "content": f"{prompt}:{paper_abstract}"},
                    ]
                }
                for attempt in range(100):
                    try:
                        response = requests.post(base_url, json=payload)
                        if response.status_code == 200:
                            data = response.json()
                            reply = data["choices"][0]["message"]["content"]
                            logging.info(f"{sites} Get keyword success.{paper['summary']} with reply = {reply}")
                            keywords = extract_keywords(reply, "</think>")
                            logging.info(keywords)
                            paper['keywords'] = keywords
                            sql = "update papers set keywords = %s where paper_links = %s"
                            cursor.execute(sql, (keywords, paper['link']))
                            conn.commit()
                            break
                        else:
                            logging.warning(
                                f"Attempt {attempt + 1}: Failed to get response. Status code: {response.status_code}")
                        # response = requests.post(base_url, json=payload)
                        # data = response.json()
                        # reply = data["choices"][0]["message"]["content"]
                        # logging.info(f"{sites} Get keyword success.{paper['summary']} with reply = {reply}")
                        # keywords = extract_keywords(reply, "</think>")
                        # logging.info(keywords)
                        # paper['keywords'] = keywords
                        # sql = "update papers set keywords = %s where paper_links = %s"
                        # cursor.execute(sql, (keywords, paper['link']))
                        # conn.commit()

                    except Exception as e:
                        logging.error(f"Attempt {attempt + 1}: {e}")
                    time.sleep(1)
                # client = OpenAI(
                #     api_key=_api_key,
                #     base_url=base_url
                # )
                # response = client.chat.completions.create(
                #     model=_model,
                #     n=1,
                #     messages=[
                #         {"role": "user", "content": f"{prompt}:{paper['summary']}"},
                #     ]
                # )
                # print(response.choices[0].message.content)
                # return response.choices[0].message.content
    cursor.close()
    conn.close()
    return papers


def get_metadata_by_title(title):
    url = "https://api.crossref.org/works"
    params = {
        "query.title": title,
        "rows": 1  # 只返回最匹配的一篇
    }
    response = requests.get(url, params=params)
    data = response.json()

    if data["message"]["items"]:
        item = data["message"]["items"][0]
        metadata = {
            "title": item.get("title", [""])[0],
            "authors": [f"{a.get('given', '')} {a.get('family', '')}" for a in item.get("author", [])],
            "DOI": item.get("DOI"),
            "publisher": item.get("publisher"),
            "published": item.get("published-print", item.get("published-online", {})).get("date-parts", [[]])[0],
            "journal": item.get("container-title", [""])[0],
            "abstract": item.get("abstract", "No abstract available")  # 试图获取摘要
        }
        return metadata
    else:
        return None

import requests

def get_dblp_metadata(title):
    url = "https://dblp.org/search/publ/api"
    params = {
        "q": title,
        "format": "json"
    }
    response = requests.get(url, params=params)
    data = response.json()
    hits = data.get("result", {}).get("hits", {}).get("hit", [])
    if hits:
        info = hits[0]["info"]
        return {
            "title": info.get("title"),
            "authors": info.get("authors", {}).get("author"),
            "venue": info.get("venue"),
            "year": info.get("year"),
            "url": info.get("url"),
            "doi": info.get("doi"),
            "abstract": info.get("abstract", "No abstract available")
        }
    else:
        return None




if __name__ == '__main__':
    _log_format = '%(asctime)s %(levelname)s %(message)s'
    _log_datefmt = '%Y-%m-%d %H:%M:%S'
    _log_level = logging.INFO
    # 日志颜色配置
    _log_colors = {
        'DEBUG': 'white',
        'INFO': 'green',
        'WARNING': 'yellow',
        'ERROR': 'red',
        'CRITICAL': 'bold_red',
    }
    logging.basicConfig(
        level=_log_level,
        format=_log_format,
        datefmt=_log_datefmt
    )
#     with open('config.yml', 'r', encoding='utf-8') as f:
#         config = yaml.safe_load(f)
#     # title = "Automated Mass Malware Factory: The Convergence of Piggybacking and Adversarial Example in Android Malicious Software Generation."
#     # ab = get_paper_abstract(title)
#     # print(ab)
#     # title = "Automated Mass Malware Factory: The Convergence of Piggybacking and Adversarial Example in Android Malicious Software Generation."
#     # ab = get_paper_abstract(title)
#     # print(ab)
#     url = "https://www.gorio.top:16779/v1/chat/completions"
#
#     headers = {
#         "Content-Type": "application/json"
#     }
#
#     data = {
#         "model": "deepseek-r1:14b",
#         "messages": [
#             {"role": "user", "content": "Hello!"}
#         ]
#     }
#
#     response = requests.post(url, headers=headers, json=data, verify=False)
#
#     print("Status code:", response.status_code)
#     print("Response:")
#     print(response.text)
#
# # 示例调用
# # get_paper_abstract(title)
#     papers = {
#         "TITS": [{
#             'summary': "The transition of power systems toward digital substations has brought numerous advantages to substation automation. However, this digital transformation exposes substations to various cyberattacks. Thus, ensuring the integrity and availability of power system data has emerged as a critical concern in modern power system networks. A crucial cybersecurity concern is pilot protection, with its security being of utmost importance in bulk power system networks to safeguard against significant disturbances and blackouts, as well as to facilitate fast fault-clearing operations. This paper introduces a dynamic state estimation (DSE) technique to supervise the operation of pilot protection scheme. The proposed scheme accurately estimates transmission line impedance and uses this estimation to supervise the legacy pilot protection scheme. The method employs network physical laws, sampled value measurements, and an Unscented Kalman Filter (UKF) technique to enhance the cybersecurity aspects of the pilot protection scheme. Additionally, the cybersecurity of the DSE-based pilot protection supervision scheme is evaluated against cyberattacks such as denial of service (DoS) and false data injection (FDI). The simulation results, validated using the IEEE 9 bus test system, demonstrate the effectiveness of the proposed method for pilot protection supervision."
#         }]
#     }
#     fetch_paper_keywords(papers, config)
    title = "The One-Wayness of Jacobi Signatures"
    # result = get_metadata_by_title(title)
    # print(result)
    # result = get_dblp_metadata(title)
    # print(result)
    file_name = 'serpapi_api_key_blacklist.json'
    # file_path = os.path.join(os.path.dirname(__file__), file_name)
    blacklist = Blacklist.load_blacklist(file_name, block_time="1M")
    blacklist.unblock_item("dccb501df33da7b3d2188a0fad20317c13160d31443c865a55adcd580151f896")
    result = get_paper_abstract(title, {"api-key":"dccb501df33da7b3d2188a0fad20317c13160d31443c865a55adcd580151f896"})
    print(result)
