import json
import queue
import random
import threading
from concurrent.futures import ThreadPoolExecutor
from DrissionPage import Chromium, ChromiumOptions
from collections import defaultdict
import pandas as pd
from flask import Flask, request, jsonify, Response
from auto_bid import handle_queue_data_to_auto_bid
import pymysql
from loguru import logger
from mysql_config import db_config_dev, db_config_prod  # 导入配置文件中的数据库配置


lock = threading.Lock()
app = Flask(__name__)

db_config = db_config_dev
# db_config = db_config_prod


execute_sql = '''SELECT t1.* FROM jg_campaign_bid_operate t1
                    INNER JOIN (
                        SELECT campaign_id, MAX(time) AS max_time
                            FROM jg_campaign_bid_operate
                        WHERE type IN (1, 2, 5)
                            GROUP BY campaign_id
                    ) t2 ON t1.campaign_id = t2.campaign_id AND t1.time = t2.max_time
                WHERE t1.type IN (1, 2, 5) and t1.seller_id is not null ORDER BY t1.seller_id desc'''


@app.route('/test_chinese')
def test_chinese():
    data = {'测试': '这是一条中文消息'}
    return Response(
        json.dumps(data, ensure_ascii=False),
        mimetype='application/json; charset=utf-8'
    )

used_ports = set()
port_lock = threading.Lock()
def get_random_port():
    """获取一个未被占用的随机端口（范围 1024-65535）"""
    while True:
        port = random.randint(50000, 65535)
        with port_lock:
            if port not in used_ports:
                used_ports.add(port)
                return port
def worker(seller_id, items, connection):
    try:
        # 每个线程创建自己的浏览器实例
        try:
            lock.acquire()
            user_data_path = r'C:\Users\Administrator\AppData\Local\Google\Chrome\User Data'
            port = get_random_port()
            co = ChromiumOptions().set_local_port(port)
            browser = Chromium(co)
            # 初始化页面并打开计划页
            create_plan_tab = initial_chromium(browser, seller_id)
        finally:
            lock.release()

        # 创建队列并提交任务
        local_queue = queue.Queue()
        for item in items:
            local_queue.put(item)
        handle_queue_data_to_auto_bid(queue=local_queue, plan_tab=create_plan_tab, connection=connection)

    except Exception as e:
        logger.error(f"线程执行失败: {e}")


@app.route('/get_campaign_data', methods=['GET'])
def get_campaign_data():
    seller_id = request.args.get('seller_id')

    if not seller_id:
        return jsonify({'error': 'Missing seller_id parameter'}), 400

    connection = None  # 显式初始化 connection 变量
    try:
        # 连接数据库
        connection = pymysql.connect(**db_config)
        with connection.cursor() as cursor:
            # 查询语句
            sql = execute_sql
            cursor.execute(sql)
            result = cursor.fetchall()

        grouped_data = defaultdict(list)

        # 将查询结果转换为列表
        column_names = [desc[0] for desc in cursor.description]
        data_list = [dict(zip(column_names, row)) for row in result]

        # 按照子账号ID去重后的数据
        for item in data_list:
            grouped_data[item['seller_id']].append(item)

        # 多线程版本
        # 创建线程池，根据实际 seller_id 数量来决定并发数
        with ThreadPoolExecutor(max_workers=len(grouped_data)) as executor:
            for seller_id, items in grouped_data.items():
                executor.submit(worker, seller_id, items, connection=connection)


        ## 普通版本
        # for seller_id, items in grouped_data.items():
        #     worker(seller_id, items, connection=connection)

        response = f"以下子账号ID: {seller_id}的所属数据: {data_list}已加入队列等待处理"

        return Response(
            json.dumps(response, ensure_ascii=False),
            mimetype='application/json; charset=utf-8'
        )

    except Exception as e:
        # 日志记录完整错误
        logger.error(f"错误类型: {type(e).__name__}\n "f"错误详情: {str(e)}")
        return jsonify({'error': str(e)}), 500
    finally:
        # 关闭数据库连接
        if connection:  # 检查 connection 是否已定义
            connection.close()


def filter_plan_id_max_time(data_list):
    # 转换为 DataFrame
    df = pd.DataFrame(data_list)
    # 将时间列转为 datetime 类型以便排序
    df['time'] = pd.to_datetime(df['time'])
    # 按 campaign_id 分组，并取每组中时间最大的那一行
    result_df = df.loc[df.groupby('campaign_id')['time'].idxmax()]
    # 转换回字典列表
    return result_df.to_dict(orient='records')


def initial_chromium(browser, seller_id):
    account_tab = browser.new_tab('https://partner.xiaohongshu.com/partner/subAccount-list')
    account_tab.ele('tag:input@@class=el-input__inner@@type=text@@autocomplete=off@@placeholder=请输入').input(seller_id)
    account_tab.ele('tag:button@@id=search-btn--1').click(by_js=True)
    search_none_ele = account_tab.ele('tag:tr@@class=d-table__empty-row@@text()=暂无数据', timeout=0.5)
    if search_none_ele:
        raise ValueError(f"合作伙伴平台子账号ID:{seller_id}-搜索结果为空, 检查表格第一行子账号ID, 或者检查是否登录合作伙伴平台")
    account_tab.ele('tag:span@@text():跳转', timeout=2).hover()
    jg_ele = account_tab.ele('tag:span@@class=d-text --color-static --color-current --size-text-paragraph d-text-nowrap d-text-ellipsis@@text():聚光平台', timeout=2)
    if jg_ele:
        create_plan_tab = jg_ele.click.for_new_tab(by_js=True)
        create_plan_tab.wait.title_change("计划列表")

    return create_plan_tab

if __name__ == '__main__':
    app.run(debug=False)