import gym
import numpy as np
from gym import spaces
import torch
import random
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from collections import defaultdict


class ParallelUpgradeEnv(gym.Env):
    def __init__(self,
                 num_components: int,
                 time_windows: list[int],
                 dependency_matrix: np.ndarray,
                 jump_matrix: np.ndarray,
                 max_parallel: int,
                 device: torch.device):
        super().__init__()
        self.device = device

        # 基本参数
        self.num_components  = num_components
        self.time_windows    = time_windows
        self.current_window  = 0
        self.current_time    = 0
        self.remaining_time  = time_windows[0]
        self.max_time_window = max(time_windows)
        self.max_parallel    = max_parallel
        self.max_versions = [len(dependency_matrix[i, 0]) for i in range(num_components)]
        self.gantt_chart = UpgradeGanttChart(max_parallel, time_windows)
        self.feats_dim = 5 + 2 + num_components

        # 记录量化指标的变量
        self.skip_count = 0                  # 本 episode 中执行“跳过”动作的总次数
        self.total_time = 0
        self.action_count = 0
        self.completion_times = {}           # 记录每个组件完成升级的时刻：component -> time_step
        self.parallel_usage_history = []     # 记录在每个 time step 中并行槽被占用的数量
        self.parallel_util_timeline = []     # 每个时间步的资源使用率（0~1）
        self.current_completion_times = {comp: None for comp in range(self.num_components)}
        # 2) 全体 episode 结束后，将每个组件的完成时刻追加到 below:
        self.episode_completion_times = {comp: [] for comp in range(self.num_components)} #待做

        # 将 numpy 数据搬到 GPU
        # dependency_matrix: shape (N, N, V), dtype int (–2, –1, ≥0)
        dependency_matrix = up_fill_dependency_matrix(num_components, max_versions, dependency_matrix)
        self.dependency_matrix = torch.tensor(
            dependency_matrix, device=device, dtype=torch.long
        )
        # jump_matrix: shape (N, V, V), values 0/1
        self.jump_matrix = torch.tensor(
            jump_matrix, device=device, dtype=torch.bool
        )

        # 升级时间矩阵 upgrade_time[c, v1, v2]
        ut = np.random.randint(3, 7,
                               size=(num_components,
                                     dependency_matrix.shape[2],
                                     dependency_matrix.shape[2]))
        self.upgrade_time = torch.tensor(
            ut, device=device, dtype=torch.long
        )
        self.max_upgrade_time = int(self.upgrade_time.max().item())

        # 合法动作列表 (comp, v1, v2)
        self.valid_actions = self._precompute_valid_actions()
        self.noop_index   = len(self.valid_actions)
        self.valid_actions.append((0, 0, 0))  # noop

        # 状态：每个组件当前版本
        self.state = torch.zeros(num_components,
                                 dtype=torch.int,
                                 device=device)
        # 正在升级的剩余时长
        self.upgrading = torch.zeros(num_components,
                                     dtype=torch.int,
                                     device=device)
        # 记录发起升级时的版本，以便完成时更新
        self.prev_version   = torch.zeros(num_components,
                                          dtype=torch.int,
                                          device=device)
        self.target_version = torch.zeros(num_components,
                                          dtype=torch.int,
                                          device=device)
        # 当前并行升级数
        self.active_upgrades = set()

        N = self.num_components
        low  = np.zeros((N+2,), dtype=np.float32)
        high = np.ones ((N+2,), dtype=np.float32)
        self.observation_space = spaces.Box(low=low, high=high, dtype=np.float32)
        self.action_space = spaces.Discrete(len(self.valid_actions))


    def get_current_valid_action_indices(self) -> list[int]: #索引
        """ 获取当前状态下合法的动作索引 """
        valid_indices = []
        for idx, (comp, v1, v2) in enumerate(self.valid_actions):
            if self.state[comp] == v2:  # 已是目标版本，跳过
                continue
            if self.state[comp] != v1:  # 当前版本不匹配，跳过
                continue
            if not self.is_dependency_satisfied(comp, v2):  # 依赖未满足，跳过
                continue
            # 确保该动作不会超出剩余时间
            upgrade_time = self.upgrade_time[comp, v1, v2]
            if upgrade_time > self.remaining_time:
                continue  # 该动作不可执行

            # 检查并行资源是否足够
            if len(self.active_upgrades) >= self.max_parallel:
                continue  # 并行资源不足

            # 检查该组件是否已经在进行升级
            if self.upgrading[comp] != 0:  # 如果该组件正在升级，跳过
                continue

            valid_indices.append(idx)
        valid_indices.append(self.noop_index) # 允许一个跳过此时间刻度的动作
        return valid_indices
    

    def _precompute_valid_actions(self): #实际定义动作
        """ 预计算所有合法的升级动作，满足依赖关系和跳跃约束 """
        valid_actions = []
        for comp in range(self.num_components):
            for v1 in range(self.max_versions[comp]):
                for v2 in range(self.max_versions[comp]):
                    if v1 < v2:
                        # 1️⃣ **检查该版本是否可用 (D[i, i, v] != -2)**
                        if self.dependency_matrix[comp, comp, v2] == -2:
                            continue
                        
                        # 2️⃣ **允许跳跃**
                        if not (self.jump_matrix[comp, v1, v2] == 1 or v2 == v1 + 1):
                            continue

                        valid_actions.append((comp, v1, v2))

        return valid_actions
    
    def get_state_action_features(self):
        """
        返回当前合法动作索引列表 + 特征张量 (K,5) on device
        特征： [t_cost, prog, span, t_rem, parallel_util]
        """
        valid_idxs = self.get_current_valid_action_indices()
        state = self.get_observation()
        K = len(valid_idxs)
        action_feats = torch.zeros((K, 5), device=self.device, dtype=torch.float32)

        t_rem   = self.remaining_time / self.max_time_window
        util    = len(self.active_upgrades) / self.max_parallel

        for i, ai in enumerate(valid_idxs):
            if ai == self.noop_index:
                continue
            c, v1, v2 = self.valid_actions[ai]
            t_cost = self.upgrade_time[c, v1, v2].float() / self.max_time_window
            prog   = v1 / (self.dependency_matrix.shape[2] - 1)
            span   = (v2 - v1) / (self.dependency_matrix.shape[2] - 1)
            action_feats[i] = torch.tensor([t_cost, prog, span, t_rem, util],
                                    device=self.device)
        
        if not isinstance(action_feats, torch.Tensor):
            action_feats = torch.tensor(action_feats, device=self.device)

        K, D = action_feats.shape
        S = state.shape[0]

        # 3) 把 state repeat 成 (K, S)
        #    unsqueeze(0) 变 (1,S)，然后 expand→(K,S)
        state_rep = state.unsqueeze(0).expand(K, S)

        # 4) 拼在一起→(K, S+D)
        combined_feats = torch.cat([state_rep, action_feats], dim=1)
        # print(combined_feats.shape)

        return valid_idxs, combined_feats

    def is_dependency_satisfied(self, component, target_version):
        """ 检查目标版本是否满足所有组件依赖 """
        for j in range(self.num_components):
            required_version = self.dependency_matrix[component, j, target_version]
            if required_version >= 0 and (self.state[j] < required_version or self.dependency_matrix[j, j, self.state[j]] == -2):
                return False  # 依赖组件未满足，或者依赖的组件版本无效
        return True
    
    def step(self, action_index: int, is_need_visualization=False):
        """ 执行动作，并更新环境，包括并行资源约束和动作索引 """
        self.last_action_index = action_index
        reward = torch.tensor(0.0, device=self.device)
        done   = False
        info = {}  # 用于返回的附加信息
        self.action_count += 1

        # noop
        if action_index == self.noop_index:
            reward = 0.5 if (self.remaining_time/self.max_time_window)<=0.1 or \
                           len(self.active_upgrades)/self.max_parallel>=0.9 else 0.0
            # 更新升级进度
            self.upgrading = torch.clamp(self.upgrading - 1, min=0)
            # 先对当前并行槽利用进行记录
            current_active = len(self.active_upgrades)
            self.parallel_usage_history.append(current_active)
            self.parallel_util_timeline.append(current_active / self.max_parallel)

            # 完成的回滚 active_upgrades & state
            finished = (self.upgrading==0) & torch.tensor(
                [c in self.active_upgrades for c in range(self.num_components)],
                device=self.device)
            for c in finished.nonzero(as_tuple=False).view(-1).tolist():
                self.state[c] = int(self.target_version[c])
                self.active_upgrades.discard(c)
                # —— 新增：若刚好升级到最高版本，记录完成时刻 —— #
                if self.state[c] == self.max_versions[c] - 1:
                    # 只有第一次升到最高版本时才记录
                    if c not in self.completion_times:
                        # 此时 self.current_time 尚未自增，所以全局时间为 current_time
                        self.completion_times[c] = self.current_time

            # 时间推进
            self.remaining_time -= 1
            self.current_time  += 1
            # 累计跳过次数
            self.skip_count += 1


        else:
            c, v1, v2 = self.valid_actions[action_index]
            # 发起并行升级
            self.prev_version[c]   = int(v1)
            self.target_version[c] = int(v2)
            self.upgrading[c]      = int(self.upgrade_time[c, v1, v2].item())
            self.active_upgrades.add(c)

            # 奖励
            util = len(self.active_upgrades)/self.max_parallel
            reward = 1.5 if util>=0.9 else 1.0
            if is_need_visualization:
                self.gantt_chart.record_upgrade(c, v1, v2, self.current_time, self.upgrading[c])



        # 时间窗切换
        if self.remaining_time <= 0:
            self.current_window += 1
            if self.current_window < len(self.time_windows):
                self.remaining_time = self.time_windows[self.current_window]
            else:
                done   = True
                reward = -100.0
                info['makespan'] = self.current_time
                self.total_time = self.current_time

        # 全部升满
        if torch.all(self.state == (torch.tensor(self.dependency_matrix.shape[2]-1,
                                                 device=self.device))):
            done   = True
            reward = 100.0
            info['makespan'] = self.current_time
            self.total_time = self.current_time

        obs = self.get_observation()

        return obs, reward, done, info



    def get_observation(self):
        """
        返回一个 torch.Tensor of shape (N+2,)：
        [ t_rem_norm, active_ratio, prog_0,…,prog_{N-1} ]
        """
        # 1) 归一化剩余时间
        t_rem_norm = torch.tensor(
            [self.remaining_time / self.max_time_window],
            device=self.device,
            dtype=torch.float32
        )

        # 2) 并行利用率
        active_ratio = torch.tensor(
            [len(self.active_upgrades) / self.max_parallel],
            device=self.device,
            dtype=torch.float32
        )

        # 3) 每个组件的进度 prog_i = current_version / (max_version_i - 1)
        max_vs = torch.tensor(self.max_versions,
                              dtype=torch.float32,
                              device=self.device) - 1.0
        # 防止除零
        max_vs = torch.where(max_vs <= 0, torch.ones_like(max_vs), max_vs)
        prog = self.state.float() / max_vs  # shape (N,)

        # 拼接成 (N+2,)
        obs = torch.cat([t_rem_norm, active_ratio, prog], dim=0)
        return obs
    

    def reset(self):
        self.current_window     = 0
        self.current_time       = 0
        self.remaining_time     = self.time_windows[0]
        self.state.fill_(0)
        self.upgrading.fill_(0)
        self.active_upgrades.clear()
        self.gantt_chart.reset()

        # 复位统计指标
        self.skip_count = 0
        self.completion_times.clear()
        self.parallel_usage_history.clear()
        self.parallel_util_timeline.clear()
        # 1) 本轮组件完成时刻置空
        self.current_completion_times = {comp: None for comp in range(self.num_components)}
        # 2) 本轮并行槽利用清空
        self.action_count = 0
        self.total_time = 0

        return self.get_observation()

    def render(self, done):
        print(f"State: {self.state}, Remaining Time: {self.remaining_time},\
               Remaining Parallel: {self.max_parallel - len(self.active_upgrades)},Current Window: {self.current_window}, \
               Last Action: {self.valid_actions[self.last_action_index]}")
        # if done:
        #     self.gantt_chart.generate_gantt_chart()


class UpgradeGanttChart:
    def __init__(self, max_parallel, time_windows):
        # ...保持其他初始化不变...
        self.time_slot_map = defaultdict(list)  # {时间点: [(y_start, y_end)]}
        self.color_map = plt.cm.get_cmap('tab20', max_versions)  # 预定义颜色映射
        self.upgrade_records = []
        self.max_parallel = max_parallel
        self.time_windows = time_windows
        self.window_boundaries = self.calculate_window_boundaries()

    def reset(self):
        """清空所有记录，准备下一轮可视化"""
        # 清空时间槽的占用映射
        self.time_slot_map.clear()      # defaultdict(list) 的 clear()
        # 清空所有已记录的升级操作
        self.upgrade_records.clear()
        
    def calculate_window_boundaries(self):
        boundaries = [0]
        for t in self.time_windows:
            boundaries.append(boundaries[-1] + t)
        return boundaries

    def record_upgrade(self, component, from_version, to_version, start_time, duration):
        # 强制转换为整数时间单位
        start = int(start_time)
        end = int(start_time + duration)
        self.upgrade_records.append((component, from_version, to_version, start, end))

    def find_optimal_position(self, start, end, height):
        """ 改进后的装箱算法 """
        # 确保高度为整数
        height = int(height)
        
        # 按时间步扫描可用空间
        for base_y in range(0, self.max_parallel - height + 1):
            # 检查所有时间点的该Y区间
            available = True
            for t in range(start, end):
                # 跳过未记录的时间点（视为可用）
                if t not in self.time_slot_map:
                    continue
                
                # 检查该时间点的占用情况
                for (y1, y2) in self.time_slot_map[t]:
                    if not (base_y + height <= y1 or base_y >= y2):
                        available = False
                        break
                if not available:
                    break
            if available:
                return base_y
        return None  # 没有可用位置时返回None

    def allocate_position(self, start, end, height):
        # 边界检查
        if height <=0 or height > self.max_parallel:
            print(f"无效高度：{height}")
            return None
        
        position = self.find_optimal_position(start, end, height)
        if position is None:
            print(f"无法在{start}-{end}时间段分配{height}并行资源")
            return None
        
        # 记录到所有时间点
        for t in range(start, end):
            self.time_slot_map[t].append( (position, position + height) )
            # 按y_start排序以便后续查询
            self.time_slot_map[t].sort(key=lambda x: x[0])
        return position

    def generate_gantt_chart(self):
        fig, ax = plt.subplots(figsize=(15, 8))
        
        # 按开始时间排序任务
        sorted_records = sorted(self.upgrade_records, key=lambda x: x[3])
        
        for comp, v1, v2, start, end in sorted_records:
            # 分配位置（确保返回整数）
            y_pos = self.allocate_position(start, end, 1)
            if y_pos is None:
                continue
            
            # 绘制任务条（强制整数坐标）
            duration = end - start
            rect = ax.barh(
                y=y_pos, 
                width=duration, 
                left=start, 
                height=1, 
                color=self.color_map(comp),  # 按组件分配颜色
                edgecolor='black', 
                alpha=0.8,
                align='edge'  # 确保对齐到整数坐标
            )
            
            # 添加文本标签（自动调整位置）
            label_x = start + duration/2
            label_y = y_pos + 1/2
            ax.text(
                label_x, label_y,
                f"C{comp}\n{v1}→{v2}",
                ha='center', va='center',
                color='white', fontsize=8,
                bbox=dict(facecolor='black', alpha=0.5, boxstyle='round,pad=0.2')
            )

        # 绘制时间窗口分界线
        for boundary in self.window_boundaries[1:-1]:
            ax.axvline(boundary, color='darkred', linewidth=1.5, linestyle='--')

        # 坐标轴美化
        ax.set_xlim(0, self.window_boundaries[-1])
        ax.set_ylim(0, self.max_parallel)
        ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))  # 强制Y轴为整数
        ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))   # 强制X轴为整数

        # —— 新增：设置坐标轴标签 —— 
        ax.set_xlabel("Time Step")         # 横轴：时间步长
        ax.set_ylabel("Parallel Slot")     # 纵轴：并行槽编号
        
        # 添加资源利用率热力图
        self._add_utilization_heatmap(ax)
        
        plt.tight_layout()
        plt.show()

    def _add_utilization_heatmap(self, ax):
        """ 添加并行利用率背景色 """
        # 计算每个时间点的并行使用率
        max_t = max([t for t in self.time_slot_map.keys()] or [0])
        utilization = np.zeros((max_t+1, self.max_parallel))
        
        for t in range(max_t+1):
            for (y1, y2) in self.time_slot_map.get(t, []):
                utilization[t, y1:y2] = 1
                
        # 绘制热力图
        ax.imshow(
            utilization.T, 
            aspect='auto', 
            cmap='Greys', 
            alpha=0.1,
            origin='lower',
            extent=(0, max_t, 0, self.max_parallel)
        )


# 定义组件数量、最大版本数、时间窗长度
num_components = 5
max_versions = 10
time_windows = [10, 15, 12, 10, 12]  # 每个时间窗的长度
max_parallel = 3
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# device = torch.device("cpu")



def generate_feasible_dependency_matrix(
    num_components: int,
    max_versions: int,
    dep_prob: float = 0.3
    ) -> np.ndarray:
    """
    随机生成一个可行的依赖矩阵 D，形状 (N, N, V)，
    D[i,j,v] = 
      -1 表示组件 i 的版本 v 不依赖组件 j，
       0<=k<V 表示依赖 j 的版本 k，
      -2 （本例不使用，可按需填充表示“不存在的版本”）。
    保证：
      * 只允许 j < i（无环）
      * D[i,i,v] = 0（自己对自己无依赖）
      * 生成的任何依赖 k 都在 [0, V) 里，可被升级到
    """
    D = np.full((num_components, num_components, max_versions), -1, dtype=int)
    for i in range(num_components):
        for v in range(max_versions):
            # 自己对自己无依赖
            D[i, i, v] = 0
            # 只往小索引组件上画依赖，保证无环
            for j in range(i):
                if np.random.rand() < dep_prob:
                    # 随机依赖 j 的某个版本 k
                    k = np.random.randint(0, max_versions)
                    D[i, j, v] = k
                    print(f"组件{i}的版本{v}依赖于组件{j}的版本{k}")
    return D


# —— 在这里加一段 “向上填充” ——  
# 如果在 v=k 时出现了对 j 的依赖 r>=0，那么所有 v>k 的位置
# 都继承这个依赖（除非它们自己本来就有更严格的依赖）
def up_fill_dependency_matrix(num_components, max_versions, dependency_matrix):
    for i in range(num_components):
        for j in range(num_components):
            current_req = -1
            for v in range(max_versions):
                req = dependency_matrix[i, j, v]
                if req >= 0:
                    # 更新 “当前依赖”
                    current_req = max(current_req, req)
                elif req == -1 and current_req >= 0:
                    # v 位置原本无依赖，继承到 current_req
                    dependency_matrix[i, j, v] = current_req
    
    return dependency_matrix


# 定义依赖矩阵（随机生成，实际应用应自定义）
# dependency_matrix = generate_feasible_dependency_matrix(num_components, max_versions, dep_prob=0.1)


# # 定义跳跃矩阵（随机生成，实际应用应自定义）
# p_one = 0.2   # 10% 是 1，90% 是 0
# jump_matrix = np.random.choice(
#     [0, 1],
#     size=(num_components, max_versions, max_versions),
#     p=[1-p_one, p_one]
# )
# # 创建环境
# env = ParallelUpgradeEnv(num_components, time_windows, dependency_matrix, jump_matrix, max_parallel, device)

# # 测试运行


# if __name__ == "__main__":
#     obs = env.reset()
#     done = False
#     while not done:
#         # action = env.action_space.sample()
#         action = env.get_current_valid_action_indices()
#         obs, reward, done, _ = env.step(random.choice(action), True)
#         env.render(done)

        