from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal

def qc_single_goal_record(record, log = False, test_steps = 1000):

    var_threshold = 1

    trial_len = 0
    trial_len_old = 0
    step_ = 0
    trial_len_var = 0
    first_trial_len = -1
    qc_pass = False
    for i in range(record.shape[0]):
        step_ += 1
        if record[i] == True:
            if first_trial_len == -1:
                first_trial_len = step_
            trial_len = step_
            if log:
                print("trial_len: ", trial_len)
            step_ = 0
            # compute the variance of trial_len
            trial_len_var = 0.9 * abs(trial_len - trial_len_old) + 0.1 * trial_len_var
            trial_len_old = trial_len
            if trial_len_var <= var_threshold and i >= 3*test_steps/4 and trial_len <= 20:
                qc_pass = True
                # break
            else:
                qc_pass = False
    if qc_pass:
        return True, trial_len_var
    else:
        return False, 100000

def get_stable_cycle_length(record, log=False, test_steps=1000):
    """
    获取稳定周期的长度，基于qc_single_goal_record的逻辑
    返回稳定周期长度，如果不稳定则返回-1
    """
    var_threshold = 1
    trial_len = 0
    trial_len_old = 0
    step_ = 0
    trial_len_var = 0
    first_trial_len = -1
    stable_lengths = []
    
    for i in range(record.shape[0]):
        step_ += 1
        if record[i] == True:
            if first_trial_len == -1:
                first_trial_len = step_
            trial_len = step_
            if log:
                print("trial_len: ", trial_len)
            step_ = 0
            # compute the variance of trial_len
            trial_len_var = 0.9 * abs(trial_len - trial_len_old) + 0.1 * trial_len_var
            trial_len_old = trial_len
            if trial_len_var <= var_threshold and i >= 3*test_steps/4 and trial_len <= 20:
                stable_lengths.append(trial_len)
    
    if len(stable_lengths) > 0:
        # 返回最常见的稳定长度
        from collections import Counter
        length_counts = Counter(stable_lengths)
        most_common_length = length_counts.most_common(1)[0][0]
        return most_common_length
    else:
        return -1

def load_task_list(task_file):
    """加载任务列表"""
    task_list = []
    with open(task_file, 'r') as f:
        for line in f:
            task_name = line.strip()
            if task_name:
                task_list.append(task_name)
    return task_list

def main():
    """ Cycle Length Filter - 筛选稳定周期长度为9的任务
    """
    print("=" * 60)
    print("   Cycle Length Filter")
    print("   筛选稳定周期长度为9的任务")
    print("=" * 60)

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_list_file", type=str, default="./rf_pass_task.txt")
    parser.add_argument("--task_dir", type=str, default="./data/adaptive_trajectory_optimization/task_envs/")
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--target_cycle_length", type=int, default=9)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.life_duration = args.life_duration

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    """ load task list
    """
    print(f"Loading task list from {args.task_list_file}...")
    task_list = load_task_list(args.task_list_file)
    print(f"Found {len(task_list)} tasks to process")

    """ load model
    """
    print("Loading model...")
    params = load_weights(rpl_config.model_pth)
    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ 批量加载所有任务
    """
    print(f"\nProcessing {len(task_list)} tasks...")
    print(f"Target cycle length: {args.target_cycle_length}")
    print("=" * 60)
    
    landscapes, states, goals = [], [], []
    valid_task_names = []
    
    for i, task_name in enumerate(task_list):
        try:
            task_path = os.path.join(args.task_dir, task_name)
            if os.path.exists(task_path):
                landscape, state, goal = load_task(task_path, display=False)
                landscapes.append(landscape)
                states.append(state)
                goals.append(goal)
                valid_task_names.append(task_name)
            else:
                print(f"Warning: Task file not found: {task_path}")
        except Exception as e:
            print(f"Error loading task {task_name}: {e}")

    num_envs = len(landscapes)
    print(f"Successfully loaded {num_envs} valid tasks")

    states = jnp.array(states)
    goals = jnp.array(goals)

    """ create grid env - 批量并行处理
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs
    
    """ 批量并行运行所有任务
    """
    print("\nRunning all tasks in parallel...")
    trajectories = []
    goal_record = []
    obs_record = []
    neural_states = []
    action_record = []

    rnn_state = model.initial_state(GE.num_envs)

    rkey = jax.random.PRNGKey(np.random.randint(0, 1000000))

    for t in range(rpl_config.life_duration):
        progress_bar(t, rpl_config.life_duration)

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions, reset=True)

        trajectories.append(np.array(GE.batched_states))
        obs_record.append(np.array(concat_obs))
        action_record.append(np.array(batched_actions))
        neural_states.append(np.array(rnn_state))
        goal_record.append(batched_goal_reached)

    print("\nSimulation completed!")

    trajectories = np.array(trajectories)
    obs_record = np.array(obs_record)
    action_record = np.array(action_record)
    neural_states = np.array(neural_states)

    trajectories = np.swapaxes(trajectories, 0, 1)
    obs_record = np.swapaxes(obs_record, 0, 1)
    action_record = np.swapaxes(action_record, 0, 1)
    neural_states = np.swapaxes(neural_states, 0, 1)

    goal_record = np.array(goal_record).T

    print("shape of trajectories: ", trajectories.shape)
    print("shape of obs_record: ", obs_record.shape)
    print("shape of action_record: ", action_record.shape)
    print("shape of neural_states: ", neural_states.shape)
    print("shape of goal_record: ", goal_record.shape)

    """ 分析每个任务的稳定周期长度 """
    print(f"\nAnalyzing stable cycle lengths...")
    
    tasks_with_target_length = []
    qc_pass_count = 0
    target_length_count = 0
    
    for i in range(goal_record.shape[0]):
        progress_bar(i, goal_record.shape[0])
        
        # 检查任务是否通过质量控制
        qc_true, trial_len_var = qc_single_goal_record(goal_record[i], log=False, test_steps=rpl_config.life_duration)
        
        if qc_true:
            qc_pass_count += 1
            # 获取稳定周期长度
            stable_length = get_stable_cycle_length(goal_record[i], log=False, test_steps=rpl_config.life_duration)
            
            if stable_length == args.target_cycle_length:
                target_length_count += 1
                tasks_with_target_length.append({
                    'task_name': valid_task_names[i],
                    'cycle_length': stable_length,
                    'trial_len_var': trial_len_var
                })
                print(f"\nFound task with cycle length {args.target_cycle_length}: {valid_task_names[i]}")
    
    print(f"\n\nAnalysis Results:")
    print(f"Total tasks processed: {num_envs}")
    print(f"Tasks passing QC: {qc_pass_count}")
    print(f"Tasks with cycle length {args.target_cycle_length}: {target_length_count}")
    print(f"QC pass ratio: {qc_pass_count / num_envs:.3f}")
    print(f"Target cycle ratio: {target_length_count / qc_pass_count:.3f}" if qc_pass_count > 0 else "No tasks passed QC")

    """ 保存结果到logs目录 """
    import datetime
    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    
    # 保存任务名称列表
    result_file = os.path.join(rpl_config.log_pth, f"tasks_cycle_length_{args.target_cycle_length}_{timestamp}.txt")
    with open(result_file, 'w') as f:
        for task_info in tasks_with_target_length:
            f.write(f"{task_info['task_name']}\n")
    
    # 保存详细信息
    detail_file = os.path.join(rpl_config.log_pth, f"tasks_cycle_length_{args.target_cycle_length}_details_{timestamp}.json")
    result_data = {
        'timestamp': timestamp,
        'total_tasks': num_envs,
        'qc_pass_count': qc_pass_count,
        'target_cycle_length': args.target_cycle_length,
        'target_length_count': target_length_count,
        'qc_pass_ratio': qc_pass_count / num_envs,
        'target_cycle_ratio': target_length_count / qc_pass_count if qc_pass_count > 0 else 0,
        'tasks_with_target_length': tasks_with_target_length
    }
    
    with open(detail_file, 'w') as f:
        json.dump(result_data, f, indent=2)
    
    print(f"\nResults saved:")
    print(f"Task list: {result_file}")
    print(f"Details: {detail_file}")
    
    if target_length_count > 0:
        print(f"\nFound {target_length_count} tasks with stable cycle length {args.target_cycle_length}:")
        for task_info in tasks_with_target_length:
            print(f"  - {task_info['task_name']} (variance: {task_info['trial_len_var']:.6f})")

if __name__ == "__main__":
    main()
