#将正式环境古诗词 导到测试
import datetime
import pymysql
import requests
import json

from dbutils.pooled_db import PooledDB


# 测试库
def getBetaConnection():
    pool = PooledDB(pymysql, 1, host='172.20.150.110', user='test_movie', passwd='movie20189933',
                db='cc_media_source', port=4308)  # 1为连接池里的最少连接数
    conn = pool.connection()
    cur = conn.cursor()
    return conn, cur

# 正式库
def getProdConnection():
    pool = PooledDB(pymysql, 1, host='193.112.206.244', user='developer_read', passwd='developer_read#coocaa',
                    db='cc_media_source', port=3306)  # 1为连接池里的最少连接数
    conn = pool.connection()
    cur = conn.cursor()
    return conn, cur

def save_poetry_to_api(poetry_list):
    url = "http://localhost:8080/api/ancientPoetry/addPoetry/invoke"
    headers = {'Content-Type': 'application/json'}
    response = requests.request("GET", url, headers=headers, data=json.dumps(poetry_list))
    if response.status_code == 200:
        print("数据保存成功")
    else:
        print(f"数据保存失败，状态码：{response.status_code}")

def update_poetry_to_api(poetry_data):
    #url = "http://localhost:8080/api/ancientPoetry/updatePoetry/invoke"
    url = "http://106.55.8.90:7001/api/ancientPoetry/updatePoetry/invoke"
    headers = {'Content-Type': 'application/json'}
    response = requests.request("GET", url, headers=headers, data=json.dumps(poetry_data))
    if response.status_code == 200:
        print("数据更新成功")
    else:
        print(f"数据保存失败，状态码：{response.status_code}")


##将正式环境古诗词 导到测试
def copyPoetry():
    conn, cur = getProdConnection()
    try:
        # 获取总记录数
        total_count_sql = "SELECT COUNT(*) FROM cc_ancient_poetry t where t.source = 'tencent' "
        cur.execute(total_count_sql)
        total_count = cur.fetchone()[0]

        batch_size = 50
        offset = 0

        while offset < total_count:
            # 使用LIMIT和OFFSET进行分页查询
            select_sql = f"""
            SELECT dynasty, author, title, content,mean, md5, play_url , `source`,simtable_id,original_data
            FROM cc_ancient_poetry where source = 'tencent' 
            LIMIT {batch_size} OFFSET {offset}
            """
            cur.execute(select_sql)
            results = cur.fetchall()

            if not results:
                # 如果没有更多数据，则退出循环
                break

            poetry_list = []
            for result in results:
                poetry_dict = {
                    "dynasty": result[0],
                    "author": result[1],
                    "title": result[2],
                    "content": result[3],
                    "mean": result[4],
                    "md5": result[5],
                    "playUrl": result[6],
                    "source": result[7],
                    "simtableId": result[8],
                    "originalData": result[9]
                }
                poetry_list.append(poetry_dict)
            print(f"第{offset}序号的数据进行保存")
            # 保存这批数据到API
            save_poetry_to_api(poetry_list)

            # 更新offset以进行下一次查询
            offset += batch_size

    finally:
        # 确保无论如何都关闭数据库连接
        cur.close()
        conn.close()


#更新腾讯来源 译文字段中 开头是 冒号的数据。
def updatePoetry():
    conn, cur = getProdConnection()

    try:

        offset= 0
        batch_size = 50
        num = 0
        while True:
            # 使用LIMIT和OFFSET进行分页查询
            select_sql = f"""
            select t.id, SUBSTRING(t.mean,2) mean from cc_ancient_poetry t where t.source = 'tencent'  and t.mean is not null and LEFT(t.mean,1) = ':'  
            LIMIT {batch_size} OFFSET {offset}
            """
            cur.execute(select_sql)
            results = cur.fetchall()

            if not results:
                # 如果没有更多数据，则退出循环
                break

            poetry_list = []
            for result in results:
                id = result[0]
                mean = result[1];
                poetry_dict = {
                    "id": id,
                    "mean": mean
                }
                poetry_list.append(poetry_dict)
                num = num+1
                print(f"第{num}条数据id:{id},更新数据")
                # 保存这批数据到API
                update_poetry_to_api(poetry_dict)

            # 更新offset以进行下一次查询
            offset += batch_size

    finally:
        # 确保无论如何都关闭数据库连接
        cur.close()
        conn.close()


#腾讯源 无名氏 改成佚名。
def updatePoetry2():
    conn, cur = getProdConnection()

    try:

        offset= 0
        batch_size = 50
        num = 0
        while True:
            # 使用LIMIT和OFFSET进行分页查询
            select_sql = f"""
            select t.id from cc_ancient_poetry t where t.author = '无名氏' and t.source ='tencent' 
            LIMIT {batch_size} OFFSET {offset}
            """
            cur.execute(select_sql)
            results = cur.fetchall()

            if not results:
                # 如果没有更多数据，则退出循环
                break

            poetry_list = []
            for result in results:
                id = result[0]
                poetry_dict = {
                    "id": id,
                    "author": '佚名'
                }
                poetry_list.append(poetry_dict)
                num = num+1
                print(f"第{num}条数据id:{id},更新数据")
                # 保存这批数据到API
                update_poetry_to_api(poetry_dict)

            # 更新offset以进行下一次查询
            offset += batch_size

    finally:
        # 确保无论如何都关闭数据库连接
        cur.close()
        conn.close()


#修复错误的content内容 select * from cc_ancient_poetry t where t.content like '%(%' and t.content like '%一作%';
def updatePoetry4():
    conn, cur = getBetaConnection()

    try:

        offset= 0
        batch_size = 100
        num = 0
        while True:
            # 使用LIMIT和OFFSET进行分页查询
            select_sql = f"""
            select t.id,t.content from cc_ancient_poetry t where t.check_status = 1 
            LIMIT {batch_size} OFFSET {offset}
            """
            cur.execute(select_sql)
            results = cur.fetchall()

            if not results:
                # 如果没有更多数据，则退出循环
                break

            poetry_list = []
            for result in results:
                id = result[0]
                content = result[1]
                poetry_dict = {
                    "id": id,
                    "content": content,
                    "tts_status": 0
                }
                poetry_list.append(poetry_dict)
                num = num+1
                print(f"第{num}条数据id:{id},更新数据")
                # 保存这批数据到API
                update_poetry_to_api(poetry_dict)

            # 更新offset以进行下一次查询
            offset += batch_size
            #只执行一次
            break

    finally:
        # 确保无论如何都关闭数据库连接
        cur.close()
        conn.close()


#更新 source ='tencent' 源的 tts_status = 1
def updatePoetry5():
    conn, cur = getProdConnection()

    try:

        offset= 0
        batch_size = 100
        num = 0
        while True:
            # 使用LIMIT和OFFSET进行分页查询
            select_sql = f"""
            select t.id,t.content from cc_ancient_poetry t where t.id >=237573
            LIMIT {batch_size} OFFSET {offset}
            """
            cur.execute(select_sql)
            results = cur.fetchall()

            if not results:
                # 如果没有更多数据，则退出循环
                break

            poetry_list = []
            for result in results:
                id = result[0]
                content = result[1]
                poetry_dict = {
                    "id": id,
                    "tts_status": 1,
                    "tts_desc":"腾讯源已有音频"
                }
                poetry_list.append(poetry_dict)
                num = num+1
                print(f"第{num}条数据id:{id},更新数据")
                # 保存这批数据到API
                update_poetry_to_api(poetry_dict)

            # 更新offset以进行下一次查询
            offset += batch_size

    finally:
        # 确保无论如何都关闭数据库连接
        cur.close()
        conn.close()

#补救错误的更新作者问题
def updatePoetry3():
    conn, cur = getProdConnection()

    try:

        offset= 0
        batch_size = 50
        num = 0
        # 使用LIMIT和OFFSET进行分页查询
        select_sql = f"""
        select t.id,t.title,t.original_data from cc_ancient_poetry t where t.id in
        (194759,194781,194796,194798,194802,194808,194818,194819,194820,194821,194833,194872,194873,194874,194875,194877,194878,194879,194881,194882,194884,194888,194897,195598,195602,196307,198120,200108,200330,201251,201745,201747,202768,204152,204292,205781,206527,207118,208923,220404,221158,221279,228735,229479,229510,230615,232368,232877,233705,236885,238236,251181,254714)
        """
        cur.execute(select_sql)
        results = cur.fetchall()

        poetry_list = []
        for result in results:
            id = result[0]
            title = result[1]
            originalData = result[2]
            # 解析外层 JSON
            outer_data = json.loads(originalData)

            # 直接从外层数据中获取 author
            author = outer_data['author']

            num = num + 1
            print(f"第{num}条数据,id:{id},title:{title},author:{author}")
            poetry_dict = {
                "id": id,
                "author": author
            }
            poetry_list.append(poetry_dict)
            # 保存这批数据到API
            update_poetry_to_api(poetry_dict)


        # 更新offset以进行下一次查询
        offset += batch_size

    finally:
        # 确保无论如何都关闭数据库连接
        cur.close()
        conn.close()

if __name__ == '__main__':
    #copyPoetry()
    updatePoetry5()