import traceback
from typing import Dict

import gym
import numpy as np
import ray
from datasets import load_dataset

from minisweagent.run.extra.swebench import DATASET_MAPPING, filter_instances
from agentic_system.environments.env_package.swe.swe.swe_bench_agent_env import SWEBenchShellEnv
from agentic_system.environments.env_package.swe.data_processor.arm64_instance import ARM64_INSTANCES
# -----------------------------------------------------------------------------
# Ray remote worker actor -----------------------------------------------------
# -----------------------------------------------------------------------------

@ray.remote(num_cpus=0.25)
class SWEBenchWorker:
    """Ray remote actor that replaces the worker function.
    Each actor hosts a *WebAgentTextEnv* instance.
    """
    
    def __init__(self, seed, max_steps):
        # Lazy import avoids CUDA initialisation issues

        # env_kwargs['seed'] = seed
        self.max_step = max_steps
        self.env = SWEBenchShellEnv()
        
    def step(self, action, valid):
        """Execute a step in the environment"""
        
        obs, reward, done, info = self.env.step(action, valid)
        
        # info['available_actions'] = self.env.get_available_actions()
        info['task_score'] = reward

        # Redefine reward. We only use rule-based reward - win for 10, lose for 0.
        if done and reward >= 1.0:
            info['won'] = True
        #     reward = 10.0
        else:
            info['won'] = False
        #     reward = 0

        return obs, reward, done, info
    
    def reset(self, instance):
        """Reset the environment with given session index"""
        obs, info = self.env.reset(instance)

        return obs, info

    def close(self):
        """Close the environment"""
        self.env.close()
        # return


# -----------------------------------------------------------------------------
# Vectorised Ray environment --------------------------------------------------
# -----------------------------------------------------------------------------

class SWEBenchMultiProcessEnv():
    """A vectorised, Ray-based wrapper around *WebAgentTextEnv*.

    ``info`` dictionaries returned by :py:meth:`step` **and** :py:meth:`reset`
    automatically contain the key ``'available_actions'`` so downstream RL code
    can obtain the *legal* action set without extra IPC overhead.
    """
    def __init__(
        self,
        seed: int = 0,
        env_num: int = 1,
        group_n: int = 1,
        is_train: bool = True,
        env_kwargs: dict = None,
    ) -> None:
        if env_kwargs is None:
            env_kwargs = {}
        super().__init__()

        # Initialize Ray if not already initialized
        if not ray.is_initialized():
            ray.init(local_mode=True)
        subset = env_kwargs.get("subset","verified")
        split = env_kwargs.get("split","test")
        filter_spec = env_kwargs.get("filter_spec","")
        slice_spec = env_kwargs.get("slice_spec","")
        dataset_path = DATASET_MAPPING.get(subset, subset)

        # print(f"Loading dataset {dataset_path}, split {split}...")
        instances = list(load_dataset(dataset_path, split=split))
        # breakpoint()
        instances = [inst for inst in instances if inst["instance_id"] in ARM64_INSTANCES]
        print(f" instance length : {len(instances)}")
        
        self.instances = filter_instances(instances, filter_spec=filter_spec, slice_spec=slice_spec, shuffle=False)
    
        self.group_n = group_n
        self.env_num = env_num
        self.num_processes = env_num * group_n
        self.is_train = is_train
        self.start_idx = 0
        if self.is_train:
            self.instances = self.instances[:int(len(instances)*0.8)]
            self.data_len = len(self.instances)
        else:
            self.instances = self.instances[int(len(instances)*0.8):]
            while len(self.instances) < 48:
                self.instances.append(self.instances[-1])
            self.data_len = len(self.instances)

        self._rng = np.random.RandomState(seed)

        self._env_kwargs = env_kwargs if env_kwargs is not None else {}

        # -------------------------- Ray actors setup --------------------------
        self._workers = []

        for i in range(self.num_processes):
            worker = SWEBenchWorker.remote(seed + (i // self.group_n), int(self._env_kwargs["max_steps"]) if "max_steps" in self._env_kwargs else 6)
            self._workers.append(worker)

    def step(self, actions: list[str], valids: list[int], buffers):
        if not buffers:
            buffers = [" "] * len(actions)
        # print(f"### action ### {actions[0]}")
        if len(actions) != self.num_processes:
            raise ValueError(
                f'Expected {self.num_processes} actions, got {len(actions)}',
            )

        # Send step commands to all workers
        futures = []
        for worker, action, valid, buffer in zip(self._workers, actions, valids, buffers):
            future = worker.step.remote(action, valid)
            futures.append(future)

        # Collect results
        try:
            results = ray.get(futures)
        except Exception:
            print(f"#### actions {actions} len {len(actions)}")
            print(f"#### valids {valids} len {len(valids)}")
            print(f"#### workers len {len(self._workers)}")
            print(traceback.format_exc())
            return None

        obs_list, reward_list, done_list, info_list = [], [], [], []
        for obs, reward, done, info in results:
            obs_list.append(obs)
            reward_list.append(reward)
            done_list.append(done)
            info_list.append(info)

        return obs_list, reward_list, done_list, info_list

    def reset(self):
        # TODO 修改为正常数值
        idx = self._rng.choice(self.data_len, size=self.env_num, replace=False)
        idx = idx + self.start_idx
        idx = np.repeat(idx, self.group_n).tolist()

        # Send reset commands to all workers
        futures = []
        for worker, i in zip(self._workers, idx):
            future = worker.reset.remote(self.instances[i])
            futures.append(future)

        # Collect results
        results = ray.get(futures)
        obs_list, info_list = [], []
        for obs, info in results:
            obs_list.append(obs)
            info_list.append(info)
            # print(f"ENV #### {info}")

        return obs_list, info_list

    # ------------------------------------------------------------------
    # Convenience helpers ----------------------------------------------
    # ------------------------------------------------------------------

    def render(self, mode: str = 'text', env_idx: int = None):
        if env_idx is not None:
            future = self._workers[env_idx].render.remote(mode)
            return ray.get(future)

        futures = []
        for worker in self._workers:
            future = worker.render.remote(mode)
            futures.append(future)
        
        return ray.get(futures)

    # ------------------------------------------------------------------
    # Clean‑up ----------------------------------------------------------
    # ------------------------------------------------------------------

    def close(self):
        # if getattr(self, '_closed', False):
        #     return

        # Close all workers and kill Ray actors
        close_futures = []
        # print(f"### WORKERS {self._workers}")
        for worker in self._workers:
            future = worker.close.remote()
            close_futures.append(future)
        
        # Wait for all workers to close
        ray.get(close_futures)
        
        # Kill all Ray actors
        for worker in self._workers:
            ray.kill(worker)
            
        self._closed = True

    def __del__(self):  # noqa: D401
        self.close()


# -----------------------------------------------------------------------------
# Factory helper --------------------------------------------------------------
# -----------------------------------------------------------------------------

def build_swe_bench_envs(
    seed: int = 0,
    env_num: int = 1,
    group_n: int = 1,
    is_train: bool = True,
    env_kwargs: dict = None,
):
    """Mirror *build_sokoban_envs* so higher‑level code can swap seamlessly."""
    return SWEBenchMultiProcessEnv(
        seed=seed,
        env_num=env_num,
        group_n=group_n,
        is_train=is_train,
        env_kwargs=env_kwargs,
    )