import logging
import time

import feedparser
import pymysql
import requests
from pymysql.cursors import DictCursor

from urllib.parse import urlparse

from scholar import get_paper_abstract
from datetime import datetime

def build_dblp_json_api_url(dblp_page_url, type=1):
    # 解析路径部分
    path = urlparse(dblp_page_url).path  # e.g., "/db/conf/ndss/ndss2025.html"

    # 去掉开头的 "/db/" 和末尾的 ".html" 以及 ".html" 后面的部分
    if path.startswith("/db/"):
        toc_key = path[4:].split(".html")[0]  # "conf/ndss/ndss2025"
    else:
        raise ValueError("链接格式不正确，必须以 /db/ 开头")

    # 构造最终 JSON API URL
    if type == 1:
        api_url = f"https://dblp.uni-trier.de/search/publ/api?q=toc%3Adb/{toc_key}.bht%3A&format=json"
    else: # type 2
        api_url = f"https://dblp.org/search/publ/api?q=toc%3Adb/{toc_key}.bht%3A&format=json"
    #     https://dblp.uni-trier.de/search/publ/api?q=toc%3Adb/conf/ndss/ndss2025.bht%3A&h=1000&format=json
    return api_url


# # 示例调用
# url = "https://dblp.org/db/conf/ndss/ndss2025.html"
# print(build_dblp_json_api_url(url))



def deal_with_dblp(db_conf, rss, serpapi_conf, timeout=60, max_retries=5):
#     现在是初始化执行，那么旧的就不需要处理。
# 先验证这个链接是否存在在conference库中，如果没存在，那么直接爬取，并入库，否则不需要处理。
    result = {}
    conn = pymysql.connect(
        host=db_conf['ip'],
        user=db_conf['user'],
        password=db_conf['password'],
        database=db_conf['schema'],
        port=db_conf['port']
    )
    cursor = conn.cursor(pymysql.cursors.DictCursor)
    for site in rss.keys():
        if site in result:
            _res_list = result[site]
        else:
            _res_list = []
            # result[site] = _res_list
        _new_key = site
        for paper in rss[site]:

            if "dblp" in paper["link"]:
                logging.info(f"deal_with_dblp working on {paper}...")
                #     只处理dblp的链接
                is_latest = False
                if "conf" in paper["link"]:
                    # 2025年6月19日 添加了一个逻辑，如果这个是当前conference_name中最新的集合，那么要检查一下这个链接里面是不是有更新。
                    sql = "select count(id) as co, title  from conferences where link = %s"
                    cursor.execute(sql, (paper["link"]))
                    res = cursor.fetchall()
                    # 如果这个数据库里面有这个论文集，检查一下是否是最新的。如果是最新的就要检查一下
                    if res[0]['co'] != 0:
                        sql = """
                                            SELECT 
                                                CASE 
                                                    WHEN pubDate IS NULL THEN 0
                                                    WHEN pubDate = (
                                                        SELECT MAX(pubDate) 
                                                        FROM conferences 
                                                        WHERE conference_name = (SELECT conference_name FROM conferences WHERE link = %s)
                                                    ) THEN 1
                                                    ELSE 0
                                                END AS is_latest
                                            FROM conferences
                                            WHERE link = %s;
                                            """

                        cursor.execute(sql, (paper['link'], paper['link']))
                        res1 = cursor.fetchone()
                        is_latest = res1['is_latest']
                    if is_latest or res[0]['co'] == 0: # 如果这个数据库中没有这个论文集，就处理。但是如果有，就不处理了。
                        _new_key = paper["title"]
                        # 获取对应的json结构
                        url = build_dblp_json_api_url(paper["link"])
                        # result --> hits --> hit -- authors -- author -- text      --> (authors)
                        #                        |-- title                          --> (title)
                #                                |-- year                           --> (published)
                #                                |-- ee                             --> (link)
                #                                get_paper_abstract(title)          --> (summary)
                        response = None
                        for attempt in range(max_retries):  # 最多重试
                            try:
                                response = requests.get(url, timeout=timeout)  # 设置超时时间为10秒
                                response.raise_for_status()  # 检查HTTP状态码
                                break  # 请求成功，跳出重试循环
                            except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:
                                logging.warning(f"Attempt {attempt + 1} failed for URL {url}: {str(e)}")
                                time.sleep(2 ** attempt)  # 指数退避
                            except Exception as e:
                                logging.error(f"Unexpected error occurred: {str(e)}")
                                break

                        if response is None:
                            logging.error(f"Failed to fetch data from URL {url} after multiple attempts.")
                            logging.info(f"Attempt {attempt + 1} failed for URL {url}, trying url type2")
                            url = build_dblp_json_api_url(paper["link"], type=2)
                            for attempt in range(max_retries):  # 最多重试
                                try:
                                    response = requests.get(url, timeout=timeout)  # 设置超时时间为10秒
                                    response.raise_for_status()  # 检查HTTP状态码
                                    break  # 请求成功，跳出重试循环
                                except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:
                                    logging.warning(f"Attempt {attempt + 1} failed for URL {url}: {str(e)}")
                                    time.sleep(2 ** attempt)  # 指数退避
                                except Exception as e:
                                    logging.error(f"Unexpected error occurred: {str(e)}")
                                    break
                            if response is None:
                                logging.error(f"Failed to fetch data from URL {url} after multiple attempts. skipped.")
                                continue  # 跳过当前循环，继续处理下一个论文集
                            # return  # 或者根据需求处理失败情况
                        data = response.json()
                        papers_set = set()
                        hits = data.get("result", {}).get("hits", {}).get('hit', [])
                        if not hits:
                            logging.warning(f"No papers found in response for URL {url}")
                            continue
                        for hit in hits:
                        # for hit in data["result"]["hits"]['hit']:
                            info = hit['info']
                            if info['ee'] in papers_set:
                                logging.warning(f"Paper {info['ee']} has already been fetched. skipped.")
                                continue

                            authors_data = info.get("authors", {}).get("author", [])
                            # 确保 authors_data 始终是列表
                            if not isinstance(authors_data, list):
                                authors_data = [authors_data]
                            author_names = []
                            for author in authors_data:
                                if isinstance(author, dict):
                                    # 使用 get 避免 KeyError，并回退到空字符串
                                    author_name = author.get("text", "")
                                else:
                                    # 如果作者信息是字符串，直接使用
                                    author_name = str(author)
                                author_names.append(author_name)
                            # formatted_authors = ';'.join(
                            #     [author['text'] + '' for author in info['authors']['author']]) + ';'
                            formatted_authors = ";".join(author_names) + ";"
                            sql = "select count(*) as co, paper_summary from papers where paper_links = %s"
                            cursor.execute(sql, (info['ee']))
                            res =  cursor.fetchall()
                            logging.info(f"sql = select paper_summary from papers where paper_links = {info['ee']}")
                            if res[0]['co'] !=0 and len(res[0]['paper_summary']) != 0:
                                _res_list.append({
                                    'title': info['title'],  # 论文名
                                    'link': info['ee'],  # 论文链接
                                    'summary': res[0]['paper_summary'],  # 论文摘要
                                    'published': info['year'],  # 发表时间
                                    'authors': formatted_authors  # 作者
                                })
                            else:
                                _res_list.append({
                                    'title': info['title'],  # 论文名
                                    'link': info['ee'],  # 论文链接
                                    'summary': get_paper_abstract(info['title'], serpapi_conf),  # 论文摘要
                                    'published': info['year'],  # 发表时间
                                    'authors': formatted_authors  # 作者
                                })
                            # print("title:" + info['title'])
                            # print("published:" + info['year'])
                            # print("link: " + info['ee'])
                            # print("author: ")
                            # print([author['text'] + ',' for author in info['authors']['author']])
                            # print("summary: " + get_paper_abstract(info['title']))
                            time.sleep(1)
                        if not is_latest:
                            logging.info(f"conferences { paper } is not latest = {is_latest}")
                            sql = "insert into conferences (conference_name, title, link, pubDate) VALUES (%s, %s, %s, %s)"
                            cursor.execute(sql, (paper["conference_name"], paper["title"], paper["link"], paper["pubDate"]))
                            conn.commit()

                elif "journals" in paper["link"]:
                    sql = "select count(id) as co, title  from journals where links = %s"
                    cursor.execute(sql, (paper["link"]))
                    res = cursor.fetchall()
                    if res[0]['co'] != 0:
                        sql = """
                                            SELECT 
                                                CASE 
                                                    WHEN pubDate IS NULL THEN 0
                                                    WHEN pubDate = (
                                                        SELECT MAX(pubDate) 
                                                        FROM journals 
                                                        WHERE journals_name = (SELECT journals_name FROM journals WHERE links = %s)
                                                    ) THEN 1
                                                    ELSE 0
                                                END AS is_latest
                                            FROM journals
                                            WHERE links = %s;
                                            """

                        cursor.execute(sql, (paper['link'], paper['link']))
                        res1 = cursor.fetchone()
                        is_latest = res1['is_latest']
                    if is_latest or res[0]['co'] == 0:  # 如果这个数据库中没有这个论文集，则插入数据库
                        _new_key = paper["title"]
                        # 获取对应的json结构
                        url = build_dblp_json_api_url(paper["link"])
                        # result --> hits --> hit -- authors -- author -- text      --> (authors)
                        #                        |-- title                          --> (title)
                        #                                |-- year                           --> (published)
                        #                                |-- ee                             --> (link)
                        #                                get_paper_abstract(title)          --> (summary)
                        # response = requests.get(url)
                        # data = response.json()
                        # data = None
                        # for attempt in range(10):  # 最多重试3次
                        #     try:
                        #         response = requests.get(url, timeout=10)  # 设置超时时间为10秒
                        #         response.raise_for_status()  # 检查HTTP状态码
                        #         data = response.json()
                        #         break  # 请求成功，跳出重试循环
                        #     except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:
                        #         logging.warning(f"Attempt {attempt + 1} failed for URL {url}: {str(e)}")
                        #         time.sleep(2 ** attempt)  # 指数退避
                        #         # data = None # 如果重试失败，设置data为一个空的json对象
                        # if data is None:
                        #     logging.warning(f"unable to fetch data from URL {url} after multiple attempts. skipped.")
                        #     continue
                        response = None
                        for attempt in range(max_retries):  # 最多重试
                            try:
                                response = requests.get(url, timeout=timeout)  # 设置超时时间为10秒
                                response.raise_for_status()  # 检查HTTP状态码
                                break  # 请求成功，跳出重试循环
                            except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:
                                logging.warning(f"Attempt {attempt + 1} failed for URL {url}: {str(e)}")
                                time.sleep(2 ** attempt)  # 指数退避
                            except Exception as e:
                                logging.error(f"Unexpected error occurred: {str(e)}")
                                break

                        if response is None:
                            logging.error(f"Failed to fetch data from URL {url} after multiple attempts.")
                            logging.info(f"Attempt {attempt + 1} failed for URL {url}, trying url type2")
                            url = build_dblp_json_api_url(paper["link"], type=2)
                            for attempt in range(max_retries):  # 最多重试
                                try:
                                    response = requests.get(url, timeout=timeout)  # 设置超时时间为10秒
                                    response.raise_for_status()  # 检查HTTP状态码
                                    break  # 请求成功，跳出重试循环
                                except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:
                                    logging.warning(f"Attempt {attempt + 1} failed for URL {url}: {str(e)}")
                                    time.sleep(2 ** attempt)  # 指数退避
                                except Exception as e:
                                    logging.error(f"Unexpected error occurred: {str(e)}")
                                    break
                            if response is None:
                                logging.error(f"Failed to fetch data from URL {url} after multiple attempts. skipped.")
                                continue  # 跳过当前循环，继续处理下一个论文集
                            # return  # 或者根据需求处理失败情况
                        data = response.json()
                        papers_set = set()
                        hits = data.get("result", {}).get("hits", {}).get('hit', [])
                        if not hits:
                            logging.warning(f"No papers found in response for URL {url}")
                            continue
                        for hit in hits:
                            info = hit['info']
                            if info['ee'] in papers_set:
                                logging.warning(f"Paper {info['ee']} has already been fetched. skipped.")
                                continue
                            authors_data = info.get("authors", {}).get("author", [])
                            # 确保 authors_data 始终是列表
                            if not isinstance(authors_data, list):
                                authors_data = [authors_data]
                            author_names = []
                            for author in authors_data:
                                if isinstance(author, dict):
                                    # 使用 get 避免 KeyError，并回退到空字符串
                                    author_name = author.get("text", "")
                                else:
                                    # 如果作者信息是字符串，直接使用
                                    author_name = str(author)
                                author_names.append(author_name)
                            # formatted_authors = ';'.join(
                            #     [author['text'] + '' for author in info['authors']['author']]) + ';'
                            formatted_authors = ";".join(author_names) + ";"
                            sql = "select count(*) as co, paper_summary from papers where paper_links = %s"
                            cursor.execute(sql, (info['ee']))
                            res = cursor.fetchall()
                            logging.info(f"sql = select paper_summary from papers where paper_links = {info['ee']}")
                            if res[0]['co'] != 0 and len(res[0]['paper_summary']) != 0:
                                _res_list.append({
                                    'title': info['title'],  # 论文名
                                    'link': info['ee'],  # 论文链接
                                    'summary': res[0]['paper_summary'],  # 论文摘要
                                    'published': info['year'],  # 发表时间
                                    'authors': formatted_authors  # 作者
                                })
                            else:
                                summaries = get_paper_abstract(info['title'], serpapi_conf)
                                if summaries is None:
                                    summaries = "No abstract available"
                                _res_list.append({
                                    'title': info['title'],  # 论文名
                                    'link': info['ee'],  # 论文链接
                                    # 'summary': get_paper_abstract(info['title'], serpapi_conf),  # 论文摘要
                                    'summary': summaries,  # 论文摘要
                                    'published': info['year'],  # 发表时间
                                    'authors': formatted_authors  # 作者
                                })
                            # print("title:" + info['title'])
                            # print("published:" + info['year'])
                            # print("link: " + info['ee'])
                            # print("author: ")
                            # print([author['text'] + ',' for author in info['authors']['author']])
                            # print("summary: " + get_paper_abstract(info['title']))
                            time.sleep(1)
                        if not is_latest:
                            logging.info(f"journals {paper} is not latest = {is_latest}")
                            sql = "insert into journals (journals_name, title, links, pubDate) VALUES (%s, %s, %s, %s)"
                            cursor.execute(sql, (paper["journal_name"], paper["title"], paper["link"], paper["pubDate"]))
                            conn.commit()
            else:
                _res_list.append(paper)
        result[_new_key] = _res_list
    cursor.close()
    conn.close()
    return result


def fetch_rss_content(sites):
    return fetch_rss_content_v2(sites)
    # results = {}
    # _rss_list = []
    # for site in sites:
    #     if site['site_name'] in results:
    #         _rss_list = results[site['site_name']]
    #     else:
    #         _rss_list = []
    #         results[site['site_name']] = _rss_list
    #     logging.info(f"{site['site_name']} fetching...")
    #     feed = feedparser.parse(site['rss_url'])
    #     logging.info(f"{site['site_name']} feed {feed}...")
    #     if len(feed['entries']) == 0:
    #         for times in range(10):
    #             time.sleep(1)
    #             feed = feedparser.parse(site['rss_url'])
    #             logging.info(f"{site['site_name']} feed error, retry {times} times...")
    #             if len(feed['entries']) != 0:
    #                 break
    #     for entry in feed.entries:
    #         if "ieeexplore" in site['rss_url']:
    #             if entry.summary is None or entry.summary == 'null':
    #                 continue
    #             _rss_list.append({
    #                 'title': entry.title,  # 论文名
    #                 'link': entry.link,  # 论文链接
    #                 'summary': entry.summary,  # 论文摘要
    #                 'published': entry.published,  # 发表时间
    #                 'authors': entry.authors  # 作者
    #             })
    #         elif "dblp" in site['rss_url']:
    #             _rss_list.append({
    #                 "conference_name" :site['site_name'],
    #                 "link": entry.link,
    #                 "title": entry.title
    #             })
    #             # deal_with_dblp(entry.link, entry.title)
    #     #         应该在这直接fetch 到对应的论文信息才对啊，不应该交由下层函数来处理了
    #     results[site['site_name']] = _rss_list
    #     logging.info(f"{site['site_name']} done!")
    #     logging.info(f"{site['site_name']} haves {results[site['site_name']]}!")
    #
    # return results

def fetch_rss_content_v2(sites, max_retries=30, timeout=60, backoff_factor=0.5):
    results = {}
    _rss_list = []
    for site in sites:
        if site['site_name'] in results:
            _rss_list = results[site['site_name']]
        else:
            _rss_list = []
            results[site['site_name']] = _rss_list
        logging.info(f"{site['site_name']} fetching...")

        # 重试机制
        for attempt in range(max_retries):
            try:
                if 'xml' in (site['rss_url']).lower():
                    feed = feedparser.parse(site['rss_url'])
                else:
                    # 使用requests获取内容（支持超时）
                    response = requests.get(site['rss_url'], timeout=timeout)
                    response.raise_for_status()  # 检查HTTP错误

                    # 使用feedparser解析内容
                    feed = feedparser.parse(response.content)
                break  # 成功则跳出重试循环

            except (requests.exceptions.RequestException,
                    requests.exceptions.Timeout) as e:
                wait_time = backoff_factor * (2 ** attempt)  # 指数退避
                logging.warning(
                    f"Attempt {attempt + 1} failed for {site['site_name']}: {str(e)}. Retrying in {wait_time:.1f}s...")
                time.sleep(wait_time)

            except Exception as e:
                logging.error(f"Unexpected error for {site['site_name']}: {str(e)}")
                break
        else:
            # 所有重试都失败
            logging.error(f"All {max_retries} attempts failed for {site['site_name']}")
            continue  # 跳过当前站点

        # feed = feedparser.parse(site['rss_url'])
        logging.info(f"{site['site_name']} feed {feed}...")
        if len(feed['entries']) == 0:
            for times in range(10):
                time.sleep(1)
                feed = feedparser.parse(site['rss_url'])
                logging.info(f"{site['site_name']} feed error, retry {times} times...")
                if len(feed['entries']) != 0:
                    break
        for entry in feed.entries:
            if "ieeexplore" in site['rss_url']:
                if entry.summary is None or entry.summary == 'null':
                    continue
                _rss_list.append({
                    'title': entry.title,  # 论文名
                    'link': entry.link,  # 论文链接
                    'summary': entry.summary,  # 论文摘要
                    'published': entry.published,  # 发表时间
                    'authors': entry.authors  # 作者
                })
            elif "dblp" in site['rss_url']:
                if "conf" in site['rss_url']:
                    _rss_list.append({
                        "conference_name" :site['site_name'],
                        "link": entry.link,
                        "title": entry.title,
                        "pubDate" : datetime.strptime(entry.published, "%a, %d %b %Y %H:%M:%S %z").strftime("%Y-%m-%d %H-%M-%S")
                    })
                elif "journals" in site['rss_url']:
                    _rss_list.append({
                        "journal_name": site['site_name'],
                        "link": entry.link,
                        "title": entry.title,
                        "pubDate": datetime.strptime(entry.published, "%a, %d %b %Y %H:%M:%S %z").strftime("%Y-%m-%d %H-%M-%S")
                    })
                else:
                    logging.warning(f"Unexpected error for {site['site_name']}: {str(entry)}")
                # deal_with_dblp(entry.link, entry.title)
        #         应该在这直接fetch 到对应的论文信息才对啊，不应该交由下层函数来处理了
        results[site['site_name']] = _rss_list
        logging.info(f"{site['site_name']} done!")
        logging.info(f"{site['site_name']} haves {results[site['site_name']]}!")

    return results

# # 函数：生成 SQL 插入语句
# def generate_insert_sql(data):
#     insert_statements = []
#
#     # 遍历每个会议类别
#     for conference_name, entries in data.items():
#         # 遍历该类别下的每条记录
#         for entry in entries:
#             title = entry['title']
#             link = entry['link']
#             # 格式化为 SQL 插入语句
#             insert_statements.append(f"('{conference_name}', '{title}', '{link}')")
#
#     # 组合成完整的 INSERT 语句
#     sql_query = "INSERT INTO conferences (conference_name, title, link) VALUES\n"
#     sql_query += ",\n".join(insert_statements) + ";"
#
#     return sql_query


if __name__ == '__main__':
    sites = [{
        "site_name": "CRYPTO",
        "rss_url":"https://dblp.org/feed/streams/conf/crypto.rss"
    },
        {
            "site_name": "EUROCRYPT",
            "rss_url": "https://dblp.org/feed/streams/conf/eurocrypt.rss"
        },
    ]
    result = fetch_rss_content(sites)
    print(result)
    # sql = generate_insert_sql(result)
    # print(sql)