#设置专题状态
import datetime
import pymysql
import requests
import json
import random


from concurrent.futures import ThreadPoolExecutor
from dbutils.pooled_db import PooledDB


# 测试库
def getBetaConnection():
    pool = PooledDB(pymysql, 1, host='172.20.150.110', user='test_movie', passwd='test_movie154104',
                db='cc_media_source', port=4308)  # 1为连接池里的最少连接数
    conn = pool.connection()
    cur = conn.cursor()
    return conn, cur

# 正式库
def getProdConnection():
    pool = PooledDB(pymysql, 1, host='193.112.206.244', user='developer_read', passwd='developer_read#coocaa',
                    db='cc_media_source', port=3306)  # 1为连接池里的最少连接数
    conn = pool.connection()
    cur = conn.cursor()
    return conn, cur


#设置专题状态
def updateTopic():
    #conn, cur = getBetaConnection()
    conn, cur = getProdConnection()
    try:
        # 获取总记录数
        #total_count_sql = "SELECT COUNT(*) FROM (select md5,count(1) from cc_ancient_poetry t where 1=1 group by t.md5 having count(1)>1 ) tt"
        #cur.execute(total_count_sql)
        #total_count = cur.fetchone()[0]
        total_count = 24433
        batch_size = 100
        offset = 0

        while offset < total_count:
            # 使用LIMIT和OFFSET进行分页查询
            select_sql = f"""
                select t.id from cc_smart_topic t where t.`status` = 1 and t.check_status = 2 order by t.modify_time  
                LIMIT {batch_size} OFFSET {offset}
                """
            cur.execute(select_sql)
            results = cur.fetchall()

            if not results:
                # 如果没有更多数据，则退出循环
                break
            ids = []
            for result in results:
                id = result[0]
                ids.append(id)
            print(f"第{offset}序号的数据进行更新")
            syncTopicCheckStatus(ids)

            # 更新offset以进行下一次查询
            offset += batch_size

    finally:
        # 确保无论如何都关闭数据库连接
        cur.close()
        conn.close()

def split_ids_into_chunks(ids, chunk_size=50):
    """将 ID 列表按指定大小分组，并用逗号拼接成字符串"""
    chunks = [ids[i:i + chunk_size] for i in range(0, len(ids), chunk_size)]
    return [",".join(map(str, chunk)) for chunk in chunks]

def syncTopicCheckStatus(ids):
    result = split_ids_into_chunks(ids, chunk_size=50)
    with ThreadPoolExecutor(max_workers=10) as executor:  # 最多 10 个并发
        executor.map(syncTopicCheckStatus_api, result)


def syncTopicCheckStatus_api(ids_str):
    url_list = [
        "http://106.52.201.90:7001/api/ccSmart/syncTopicCheckStatusByIds/invoke",
        "http://106.55.8.90:7001/api/ccSmart/syncTopicCheckStatusByIds/invoke",
        "http://106.53.146.53:7001/api/ccSmart/syncTopicCheckStatusByIds/invoke"  # 注意：这里有两个重复的URL，可能是笔误
    ]

    # 随机选择一个URL
    selected_url = random.choice(url_list)
    url = f"{selected_url}?ids={ids_str}"
    # 请求头
    headers = {}
    try:
        print(f"发起有效状态更新,url:{url}")
        requests.get(url, headers=headers, timeout=1)  # 设置 timeout 避免阻塞
        print(f"Request sent with ids_str={ids_str}")
    except Exception as e:
        print(f"Error with ids_str={ids_str}: {e}")


if __name__ == '__main__':
    updateTopic()