import sys
import csv

import numpy as np
import lib.tools as my_tools

from tqdm import tqdm
from qpsolvers import solve_qp
from lib.model import system_model
from itertools import product
from loguru import logger

# 用于多线程处理，python 3.14开始，可提供无GIL锁的多线程，是真正的并行计算
import threading
import queue

interception_system = system_model("interception_system")
logger.add("solve.log")

def solve_dataset_n3(yaml_name):
    # 加载参数
    config = my_tools.load_yaml(yaml_name)
    logger.info(f"Params {config} have been loaded.")
    eta = config['eta']
    w_u = config['w_u']
    w_omega = config['w_omega']
    
    # 状态量
    omega_z = np.linspace(-0.8, 0.8, num=int((0.8 - (-0.8)) / 0.1)+1, dtype=np.float64)
    gen_n3d = interception_system.generate_n3(bound = 0.2)
    n3d = list(gen_n3d)
    gen_n3 = interception_system.generate_n3(bound = 0.2)
    n3 = list(gen_n3)

    data = [
        ["omega_z", len(omega_z)],
        ["n3", len(n3)],
        ["n3d", len(n3d)],
        ["all combinations", len(omega_z) * len(n3) * len(n3d)]
    ]
    my_tools.print_table(data, "State seed lengths")

    # 根据状态组合数量添加带进度条的循环
    state_pdt = product(range(len(omega_z)), range(len(n3)), range(len(n3d)))
    state_len = len(omega_z) * len(n3) * len(n3d)
    state_tqdm = tqdm(state_pdt, total = state_len, desc="Adding combinations",ncols=110)

    # 创建numpy内存映射文件
    datasetfilename = 'sorted_dataset_attitude.npy'
    fp = np.memmap(datasetfilename, dtype='float32', mode='w+', shape=(state_len, 12))

    # 使用线程池和批量处理
    import concurrent.futures
    from functools import partial

    # data_item = []
    # n3_dataset_saver = my_tools.save_dataset_chunk(data_item, 'sorted_dataset_attitude.pickle')
    # next(n3_dataset_saver)
    
    with open('solved_dataset_attitude.csv', 'w', newline='', encoding='utf-8') as csvfile:
        writer = csv.writer(csvfile)
        # 写入表头
        writer.writerow(['D_value', 'omega_x', 'omega_y', 'omega_z', 'n3_x', 'n3_y', 'n3_z', 'n3_xd', 'n3_yd', 'n3_zd', 'desired_D_value','opt_error'])
        # 使用生成器函数生成状态组合，避免一次性加载到内存
        def generate_state_combinations():
            for s1, s2, s3 in state_tqdm:
                yield (omega_z[s1], n3[s2], n3d[s3])
        
        # 使用线程池并行处理
        thread_nums = min(16, len(omega_z) * len(n3) * len(n3d))  # 限制最大线程数
        
        # 创建处理函数的部分应用
        worker_func = partial(process_single_state, w_omega=w_omega, w_u=w_u, eta=eta)
        
        # 使用线程池执行
        with concurrent.futures.ThreadPoolExecutor(max_workers=thread_nums) as executor:
            # 分批处理状态组合，减少内存占用
            batch_size = 1000000  # 每批处理状态数
            total_states = len(omega_z) * len(n3) * len(n3d)
            
            # 重新初始化生成器
            state_generator = generate_state_combinations()
            
            # 分批处理
            for batch_num in range(0, total_states, batch_size):
                # 获取当前批次的状态
                batch_states = []
                for _ in range(min(batch_size, total_states - batch_num)):
                    try:
                        state = next(state_generator)
                        batch_states.append(state)
                    except StopIteration:
                        break
                
                if not batch_states:
                    continue
                # 计算总批次数
                total_batches = (total_states + batch_size - 1) // batch_size
                current_batch = batch_num//batch_size + 1
                
                # 提交当前批次任务
                # logger.warning(f"submitting batch {current_batch}/{total_batches}...")
                future_to_state = {}
                with tqdm(total=len(batch_states), desc=f"Submitting batch {current_batch}/{total_batches}", ncols=110) as pbar:
                    for state in batch_states:
                        future = executor.submit(worker_func, state)
                        future_to_state[future] = state
                        pbar.update(1)
                
                # 处理当前批次结果
                # logger.warning(f"processing batch {current_batch}/{total_batches}...")
                future_index = 0
                with tqdm(total=len(batch_states), desc=f"Processing batch {current_batch}/{total_batches}", ncols=110) as pbar:
                    for future in concurrent.futures.as_completed(future_to_state):
                        try:
                            one_result = future.result()
                            fp[(current_batch-1)*batch_size + future_index, :] = one_result.astype(np.float32)
                            
                            future_index += 1
                            # data_item.append(one_result)
                            # 实时写入CSV
                            # writer.writerow(result)
                            # csvfile.flush()  # 确保数据写入磁盘
                        except Exception as e:
                            logger.error(f"Error processing state: {e}")
                        pbar.update(1)
                    fp.flush()
                    # n3_dataset_saver.send(data_item)
                    # data_item = []
    # n3_dataset_saver.close()

def process_single_state(state, w_omega, w_u, eta):
    omega_z, n3, n3d = state
    if np.array_equal(n3, n3d):
        return np.array([0, 0, 0, omega_z, n3[0], n3[1], n3[2], n3d[0], n3d[1], n3d[2], 0, 0], dtype=np.float64)
    else:
        tilde_n3x = n3[0] - n3d[0]
        tilde_n3y = n3[1] - n3d[1]
        tilde_n3z = n3[2] - n3d[2] 
        N =  np.array([[tilde_n3y*n3[2] - tilde_n3z*n3[1], tilde_n3z*n3[0] - tilde_n3x*n3[2]]])
        P = N.T @ (w_omega*N) + w_u*np.eye(2)
        n_c = -(tilde_n3x*n3[1]*omega_z - tilde_n3y*n3[0]*omega_z + eta*(tilde_n3x**2 + tilde_n3y**2))
        q = -n_c*w_omega*N.T
        lb = np.array([-2, -2])
        ub = np.array([2, 2])
        omega2 = solve_qp(P, q, lb=lb, ub=ub, solver="quadprog")

        # 其他辅助数值计算
        D_value = interception_system.cal_D_value_n3(n_3 = n3, 
                                                    n_3d = n3d, 
                                                    omega = np.array([omega2[0], omega2[1], omega_z])
                                                    )
        error = N @ omega2.reshape(2,1) - n_c
        desired_D_value = -eta*(tilde_n3x**2 + tilde_n3y**2)
        
        # 创建np行向量
        item_np = np.array([D_value[0,0], omega2[0], omega2[1], omega_z, n3[0], n3[1], n3[2], n3d[0], n3d[1], n3d[2], desired_D_value, error.item()], dtype=np.float64)
        return item_np

COMMANDS = {
    "attitude": "Sort attitude control dataset"
}

if __name__ == "__main__":
    if len(sys.argv) == 1:
        my_tools.print_help(sys.argv[0], COMMANDS)

    if len(sys.argv) == 2:
        # param miss
        logger.error("YAML file name miss.")

    if len(sys.argv) == 3:
        if sys.argv[1] == 'attitude':
            yaml_name = sys.argv[2]
            solve_dataset_n3(yaml_name)
        else:
            my_tools.print_help(sys.argv[0], COMMANDS)