import requests
import json
import time
import MySQLdb


def get_ip():
    kdl_url = "https://dps.kdlapi.com/api/getdps/?secret_id=o6ykpp1tqja9zj9g4947&signature=ne52s9ngdvy61nrghkzf7kytjk7c3qgk&num=1&format=json&sep=1"
    ip = requests.get(kdl_url).json()["data"]["proxy_list"][0]
    proxies = {
        # "http://账户名:密码@IP地址:端口号",
        'http': f'http://d2769292027:8bnrqo2g@{ip}',
        'https': f'http://d2769292027:8bnrqo2g@{ip}',
    }
    return proxies


# 获取作者名字
# 读取 author.json
with open("author.json", "r", encoding="utf-8") as f:
    authors_data = json.load(f)

# 提取作者名列表（如 ["王伟", "张磊", "刘伟", ...]）
author_names = [item["v"] for item in authors_data]
# authors是列表，里面放的是作者姓名
authors = [author["v"] for authors_data in author_names for author in authors_data]

# 配置数据库
db_config = {
    'host': 'localhost',
    'user': 'root',  # 替换为你的用户名
    'password': '',  # 替换为你的密码
    'database': 'bilibili',  # 替换为你的数据库名
    'port': 3307,
    'autocommit': True  # 自动提交事务
}
# 建立数据库连接
conn = MySQLdb.connect(**db_config)
cursor = conn.cursor()
print("数据库连接成功")

headers = {
    "Accept": "application/json, text/javascript, */*; q=0.01",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "Connection": "keep-alive",
    "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
    "Origin": "https://www.nstl.gov.cn",
    "Referer": "https://www.nstl.gov.cn/resources_search.html?t=DegreePaper",
    "Sec-Fetch-Dest": "empty",
    "Sec-Fetch-Mode": "cors",
    "Sec-Fetch-Site": "same-origin",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
    "X-Requested-With": "XMLHttpRequest",
    "sec-ch-ua": "\"Microsoft Edge\";v=\"137\", \"Chromium\";v=\"137\", \"Not/A)Brand\";v=\"24\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\""
}
cookies = {
    "NCK": "PYWLWV2LWG5JE5VLH4YRDDWXBMELLJTA",
    "libcode": "CN000000",
    "C": "0",
    "nstl_rsp": "",
    "nstl_token": "564a9e6a-c344-801b-a48b-35d66af97c5d",
    "SESSION": "MTU3MWZjZDQtMGQyZC00MjEwLWI2ZmUtM2FkMDdlYWQ2ODQ0"
}
url = "https://www.nstl.gov.cn/api/service/nstl/web/execute"
params = {
    "target": "nstl4.search4",
    "function": "paper/pc/list/pl"
}

data = {
    "query": "{\"c\":10,\"st\":\"0\",\"f\":[],\"p\":\"\",\"q\":[{\"k\":\"uni_s\",\"a\":1,\"o\":\"\",\"f\":1,\"v\":\"山东大学\"},{\"k\":\"hasAutnam_s\",\"a\":1,\"o\":\"\",\"f\":1,\"v\":\"王伟\"}],\"op\":\"AND\",\"s\":[\"nstl\",\"haveAbsAuK:desc\",\"yea:desc\",\"score\"],\"t\":[\"DegreePaper\"]}",
    "webDisplayId": "11",
    "sl": "1",
    "searchWordId": "430098e29f4eb4c47bff8e9129582135",
    "searchId": "939ec8870b055bdab21f3d9a8338d3ad",
    "facetRelation": "[{\"id\":\"a3b8c1874555d4f7f0d62f165dfe97f0\",\"sequence\":1,\"field\":\"uni_s\",\"name\":\"院校\",\"value\":\"山东大学\"},{\"id\":\"179a1e094c5a12b0ddc9db606f20b832\",\"sequence\":1,\"field\":\"hasAutnam_s\",\"name\":\"作者\",\"value\":\"王伟\"}]",
    "pageSize": "30",
    "pageNumber": "1"
}

# 获取一个IP
proxies = get_ip()
error_count = 0  # 错误计数器
max_errors_before_change = 4  # 更换IP的错误阈值

for author in authors:
    # 1. 修改 data["query"]（替换作者名）
    query_dict = json.loads(data["query"])
    for item in query_dict["q"]:  # 遍历查询条件
        if item["k"] == "hasAutnam_s":  # 找到作者字段
            item["v"] = author  # 替换为当前 author
    data["query"] = json.dumps(query_dict, ensure_ascii=False)  # 重新转成 JSON 字符串

    # 2. 修改 data["facetRelation"]（替换作者名）
    facet_relation_list = json.loads(data["facetRelation"])
    for item in facet_relation_list:
        if item["field"] == "hasAutnam_s":  # 找到作者字段
            item["value"] = author
    data["facetRelation"] = json.dumps(facet_relation_list, ensure_ascii=False)

    page_number = 1  # 从第一页开始
    has_more_data = True  # 标记是否还有更多数据

    while True:
        try:
            # 更新当前页码
            data['pageNumber'] = str(page_number)

            # 缓存列表
            cache_data_list = []

            response = requests.post(url, headers=headers, cookies=cookies, params=params, data=data, proxies=proxies,
                                     timeout=10)
            json_data = response.json()

            # 检查data字段
            if 'data' not in json_data:
                print(f"第{page_number}页: 响应缺少data字段,表明该作者文献已全部保存")
                break
            data_list = json_data["data"]

            # 处理当前页数据
            for temp in data_list:
                title = auther = teacher = None
                for val in temp:
                    if val.get("f") == "tit":
                        title = val.get("v")[0]
                    if val.get("f") == "hasAut":
                        auther = val.get("v")[0][-1]["v"][0]
                    if val.get("f") == "hasTut":
                        teacher = val.get("v")[0][-1]["v"][0]
                cache_data_list.append((title, auther, teacher))
                print(title, auther, teacher)
            # 处理完成一页后，准备下一页
            print(f"第 {page_number} 页: 共{len(cache_data_list)}条数据。")
            page_number += 1

            # sql命令
            sql = """INSERT INTO tb_wenxian (title, auther, teacher)VALUES (%s, %s, %s)"""
            cursor.executemany(sql, cache_data_list)
            conn.commit()
            print(f"第{page_number - 1}页数据已完成插入,已存入数据库{len(cache_data_list)}")
            cache_data_list.clear()
            # print(f"page_number:{page_number}")

        except MySQLdb.Error as e:
            print(f"数据库错误: {e}")
            break
        except Exception as e:
            error_count += 1
            print(f"\n发生错误 ({error_count}/{max_errors_before_change}): {type(e).__name__}")
            print(f"错误详情: {str(e)}")
            # 错误次数达到阈值时更换IP
            if error_count >= max_errors_before_change:
                print("\n⚠️ 错误次数达到阈值，正在更换IP...")
                proxies = get_ip()
                error_count = 0  # 重置计数器
                print(f"新IP: {proxies.get('http', '未知')}")
            # 等待3秒后重试
            time.sleep(3)

# 关闭数据库连接
cursor.close()
conn.close()
print("数据库连接已关闭")
