﻿from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from argparse import Action
from collections import deque

import os
import random
import numpy as np
import matplotlib.pyplot as plt
import time
import copy

import torch
from torch_geometric.loader import DataLoader, DataListLoader
from torch.autograd import Variable
import torch.nn as nn 
from progress.bar import Bar

from config import get_parse_args
from utils.logger import Logger

from rl.old_deepgate import get_recurrent_gnn as old_model
from rl.new_deepgate import get_recurrent_gnn as new_model


class Agent(object):
    def __init__(self, args, config) -> None:
        super().__init__()

        print('==> Using settings {}'.format(args))

        logger = Logger(args)
        self.args = args
        self.replayMemory = deque()
        self.config = config
        self.train_times = 0
        self.action_times = 0
        self.average_loss = []

        # GPU
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus_str
        
        # 更安全的设备选择
        cuda_available = False
        try:
            cuda_available = torch.cuda.is_available() and args.gpus[0] >= 0
        except:
            cuda_available = False
            
        if cuda_available:
            try:
                args.device = torch.device('cuda')
                # 进一步测试CUDA是否真的可用
                test_tensor = torch.zeros(1).to(args.device)
            except Exception as e:
                print(f"[WARNING] CUDA声称可用但出错: {str(e)}")
                cuda_available = False
                args.device = torch.device('cpu')
        else:
            args.device = torch.device('cpu')

        print('Using device: ', args.device)

        # Model
        print('==> Creating model...')
        try:
            if self.args.RL_model == 'deepgate':
                self.Q_net = old_model(args)
                self.Q_netT = old_model(args)
                
                if cuda_available:
                    try:
                        self.Q_net = self.Q_net.to(args.device)
                        self.Q_netT = self.Q_netT.to(args.device)
                    except Exception as e:
                        print(f"[WARNING] 将模型移动到GPU时出错: {str(e)}")
                        args.device = torch.device('cpu')
                        self.Q_net = self.Q_net.cpu()
                        self.Q_netT = self.Q_netT.cpu()
            elif self.args.RL_model == 'non_level' or self.args.RL_model == 'non_level_nonattn':
                self.Q_net = new_model(args)
                self.Q_netT = new_model(args)
                
                if cuda_available:
                    try:
                        self.Q_net = self.Q_net.to(args.device)
                        self.Q_netT = self.Q_netT.to(args.device)
                    except Exception as e:
                        print(f"[WARNING] 将模型移动到GPU时出错: {str(e)}")
                        args.device = torch.device('cpu')
                        self.Q_net = self.Q_net.cpu()
                        self.Q_netT = self.Q_netT.cpu()
            else:
                raise Exception("未知模型类型")
        except Exception as e:
            print(f"[ERROR] 创建模型失败: {str(e)}")
            import traceback
            traceback.print_exc()
            raise e
            
        print(self.Q_net)
        self.optimizer = torch.optim.Adam(self.Q_net.parameters(), args.lr, weight_decay=args.weight_decay)
        self.loss_func = nn.MSELoss()
        
    def train(self):
        config = self.config
        begin_time = time.time()
        bar = Bar()

        # Step 1: obtain random minibatch from replay memory
        minibatch = random.sample(self.replayMemory, config.BATCH_SIZE)
        state_batch_all = [data[0] for data in minibatch]
        action_batch_all = [data[1] for data in minibatch]
        reward_batch_all = [data[2] for data in minibatch]
        nextState_batch_all = [data[3] for data in minibatch]

        # Step 2: calculate y (Q_real)
        state_batch = None
        nextState_batch = None
        for batch_idx in range(config.BATCH_SIZE):
            if state_batch == None:
                state_batch = copy.deepcopy(state_batch_all[batch_idx])
                nextState_batch = copy.deepcopy(nextState_batch_all[batch_idx])
            else:
                state_batch.forward_level = torch.cat((state_batch.forward_level, state_batch_all[batch_idx].forward_level), 0)
                state_batch.backward_level = torch.cat((state_batch.backward_level, state_batch_all[batch_idx].backward_level), 0)
                state_batch.forward_index = torch.cat((state_batch.forward_index, state_batch_all[batch_idx].forward_index), 0)
                state_batch.backward_index = torch.cat((state_batch.backward_index, state_batch_all[batch_idx].backward_index), 0)
                edge_index = copy.deepcopy(state_batch_all[batch_idx].edge_index)
                for k in range(len(edge_index[0])):
                    edge_index[0][k] += len(state_batch.x)
                    edge_index[1][k] += len(state_batch.x)
                state_batch.edge_index = torch.cat((state_batch.edge_index, edge_index), 1)
                state_batch.x = torch.cat((state_batch.x, state_batch_all[batch_idx].x), 0)

                nextState_batch.forward_level = torch.cat((nextState_batch.forward_level, nextState_batch_all[batch_idx].forward_level), 0)
                nextState_batch.backward_level = torch.cat((nextState_batch.backward_level, nextState_batch_all[batch_idx].backward_level), 0)
                nextState_batch.forward_index = torch.cat((nextState_batch.forward_index, nextState_batch_all[batch_idx].forward_index), 0)
                nextState_batch.backward_index = torch.cat((nextState_batch.backward_index, nextState_batch_all[batch_idx].backward_index), 0)
                edge_index = copy.deepcopy(nextState_batch_all[batch_idx].edge_index)
                for k in range(len(edge_index[0])):
                    edge_index[0][k] += len(nextState_batch.x)
                    edge_index[1][k] += len(nextState_batch.x)
                nextState_batch.edge_index = torch.cat((nextState_batch.edge_index, edge_index), 1)
                nextState_batch.x = torch.cat((nextState_batch.x, nextState_batch_all[batch_idx].x), 0)
        state_batch.num_nodes = len(state_batch.x)
        nextState_batch.num_nodes = len(nextState_batch.x)

        reward_batch = torch.tensor(reward_batch_all)
        # normalization 
        max_reward = torch.abs(torch.max(reward_batch))
        min_reward = torch.abs(torch.min(reward_batch))
        for ele in reward_batch:
            if ele > 0:
                ele /= max_reward
            elif ele < 0: 
                ele /= min_reward

        QValue_batch = self.Q_netT(nextState_batch)[0]
        QValue_batch = QValue_batch.to('cpu')
        QValue_batch = QValue_batch.detach().numpy()
        terminal = minibatch[0][4]
        if terminal:
            y_real = reward_batch
        else:
            y_real = reward_batch + config.GAMMA * np.max(QValue_batch)

        # Step 3: calcuate y (Q_pred)
        y_predict_all = self.Q_net(state_batch)[0]
        y_predict_all = y_predict_all.to('cpu')
        y_predict = None
        begin_idx = 0
        for batch_idx in range(config.BATCH_SIZE):
            if action_batch_all[batch_idx][1] == 'AND':
                new_y = y_predict_all[action_batch_all[batch_idx][0] + begin_idx][0]
            elif action_batch_all[batch_idx][1] == 'OR':
                new_y = y_predict_all[action_batch_all[batch_idx][0] + begin_idx][1]
            elif action_batch_all[batch_idx][1] == 'OP':
                new_y = y_predict_all[action_batch_all[batch_idx][0] + begin_idx][2]
            
            begin_idx += len(state_batch_all[batch_idx].x)
            if y_predict == None:
                y_predict = new_y.unsqueeze(0)
            else:
                y_predict = torch.cat((y_predict, new_y.unsqueeze(0)),0)
        
        # Step 4: Loss
        loss = self.loss_func(y_predict, y_real)
        
        # Step 5: optimize the network
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        
        # Step 6: update target network
        if self.train_times % config.UPDATE_TARGET_NET == 0:
            self.Q_netT.load_state_dict(self.Q_net.state_dict())
        
        self.train_times += 1
        self.average_loss.append(loss.item())
        
        return loss.item()

    def getAction(self, netlist, cp_idx, cp_tot):
        cand_score = []
        cand_idx = []
        cand_type = []
        self.action_times += 1

        # Random Action
        if self.action_times < self.config.RANDOM_ACTION and self.args.RL_mode == 'train':
            print("[INFO] Random Action")
            cand_idx = []
            for idx in range(netlist.init_size):
                if netlist.mask[idx] == 1:
                    if self.args.target == 'ATPG_PC' and netlist.allGatesVec[idx].gate_type != self.args.gate_to_index['BUFF']:
                        continue
                    cand_idx.append(idx)
                    cand_idx.append(idx)
                    cand_idx.append(idx)
                    cand_type.append('AND')
                    cand_type.append('OR')
                    cand_type.append('OP')

            if len(cand_idx) < self.args.no_tp_each_round:
                return [-1], [-1]

            sorted_id = list(range(len(cand_idx)))
            random.shuffle(sorted_id)

        # Get Action from model
        else:
            try:
                # 安全地将图转移到适当的设备
                try:
                    if torch.cuda.is_available() and self.args.device.type == 'cuda':
                        graph = self.currentState.to(self.args.device)
                    else:
                        graph = self.currentState.cpu()
                        self.args.device = torch.device('cpu')
                except Exception as e:
                    print(f"[WARNING] 将图数据转移到设备时出错: {str(e)}")
                    graph = self.currentState.cpu()
                    self.args.device = torch.device('cpu')
                
                graph.cp_idx = cp_idx
                graph.cp_tot = cp_tot
                
                # 获取Q值
                QValue = self.Q_net(graph)[0].detach()
                
                # 安全地将结果转移到CPU
                QValue = QValue.cpu().numpy()

                for idx in range(netlist.init_size):
                    if netlist.mask[idx] == 1:
                        if self.args.target == 'ATPG_PC' and netlist.allGatesVec[idx].gate_type != self.args.gate_to_index['BUFF']:
                            continue
                        cand_score.append(QValue[idx][0])
                        cand_score.append(QValue[idx][1])
                        cand_score.append(QValue[idx][2])
                        cand_idx.append(idx)
                        cand_idx.append(idx)
                        cand_idx.append(idx)
                        cand_type.append('AND')
                        cand_type.append('OR')
                        cand_type.append('OP')

                if len(cand_score) < self.args.no_tp_each_round:
                    return [-1], [-1]

                sorted_id = sorted(range(len(cand_score)), key=lambda k: cand_score[k], reverse=True)
            except Exception as e:
                print(f"[ERROR] 获取动作失败: {str(e)}")
                # 在发生错误时回退到随机动作
                print("[INFO] 回退到随机动作")
                cand_idx = []
                for idx in range(netlist.init_size):
                    if netlist.mask[idx] == 1:
                        if self.args.target == 'ATPG_PC' and netlist.allGatesVec[idx].gate_type != self.args.gate_to_index['BUFF']:
                            continue
                        cand_idx.append(idx)
                        cand_idx.append(idx)
                        cand_idx.append(idx)
                        cand_type.append('AND')
                        cand_type.append('OR')
                        cand_type.append('OP')
                
                if len(cand_idx) < self.args.no_tp_each_round:
                    return [-1], [-1]
                
                sorted_id = list(range(len(cand_idx)))
                random.shuffle(sorted_id)

        tp_pos = []
        tp_type = []

        k = 0
        while len(tp_pos) < self.args.no_tp_each_round:
            if cand_idx[sorted_id[k]] not in tp_pos:
                tp_pos.append(cand_idx[sorted_id[k]])
                tp_type.append(cand_type[sorted_id[k]])
            k += 1

        return tp_pos, tp_type

    def setMemory(self, action, reward, nextState, terminal):
        """设置记忆"""
        self.replayMemory.append((self.currentState, action, reward, nextState, terminal))
        if len(self.replayMemory) > self.config.MEMORY_CAPACITY:
            self.replayMemory.popleft()
        self.currentState = nextState

    def setInitState(self, initState):
        """设置初始状态"""
        self.currentState = initState

    def save_loss_figure(self, fig_path):
        """保存损失图表"""
        plt.figure()
        plt.plot(range(len(self.average_loss)), self.average_loss)
        plt.savefig(fig_path)
        plt.close()
