import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from torch.distributions import Categorical
from masking_categorical import CategoricalMasked
import config
from object import delay
import random
import math
import os

import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from deepctr_torch.models import xDeepFM
from deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names
import dcn as dcn
gpus = [0]
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')


# Trick 8: orthogonal initialization
def orthogonal_init(layer, gain=1.0):
    nn.init.orthogonal_(layer.weight, gain=gain)
    nn.init.constant_(layer.bias, 0)


import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler

import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler

import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler


class Actor(nn.Module):
    def __init__(self, args):
        super(Actor, self).__init__()
        
        self.fc_downloadtime = nn.Linear(25*731, 512)
        self.fc_alg_shu = nn.Linear(731, 512)
        self.fc2 = nn.Linear(512, 128)
        self.fc3 = nn.Linear(128, 128)
        self.fc4 = nn.Linear(128, args.action_dim)
        self.linear_layer = nn.Linear(32, 512)
        self.linear_layer_2 = nn.Linear(1352, 26)
        
        self.activate_func = [nn.ReLU(), nn.Tanh()][args.use_tanh]
        self.deepdcn = None
        self.embedding_size = 64

        # parameters of DCN
        self.dense_features = ['C' + str(i) for i in range(1, 13)]
        self.sparse_features = ['I' + str(i) for i in range(1, 720)]

        if args.use_orthogonal_init:
            print("------use_orthogonal_init------")
            orthogonal_init(self.fc_downloadtime)
            orthogonal_init(self.fc_alg_shu)
            orthogonal_init(self.fc2)
            orthogonal_init(self.fc3)
            orthogonal_init(self.fc4, gain=0.01)

    def forward(self, obs):
        downloadtime_input = torch.tensor(obs['download_finish_time']).view(1, -1).to(device)
        downloadtime_input = downloadtime_input.to(self.fc_downloadtime.weight.dtype)
        layer01_input = torch.tensor(obs['tmp_layer01'], dtype=torch.float).to(device)
        alg_shu_input = torch.tensor(obs['alg_shu']).to(device)
        s_ = np.resize(alg_shu_input.cpu().detach().numpy(), (config.EDGE_NODE_NUM+1, 731))
        # 将输入张量转换为与权重张量相同的数据类型
        s_ = torch.tensor(s_).to(device).float()  # 将输入张量转换为 float 类型

        
        
        self.s_df = pd.DataFrame(s_.cpu().numpy(), columns=[self.dense_features + self.sparse_features])
        
        feat_sizes = {}
        feat_sizes_dense = {feat: 1 for feat in self.dense_features}
        feat_sizes_sparse = {feat: len(self.s_df[feat]) for feat in self.sparse_features}
        feat_sizes.update(feat_sizes_dense)
        feat_sizes.update(feat_sizes_sparse)

        for feat in self.sparse_features:
            lbe = LabelEncoder()
            self.s_df[feat] = lbe.fit_transform(self.s_df[feat])

        mms = MinMaxScaler(feature_range=(0, 1))
        self.s_df[self.dense_features] = mms.fit_transform(self.s_df[self.dense_features])

        self.fixlen_feature_columns = [(feat, 'sparse') for feat in self.sparse_features] + [(feat, 'dense') for feat in self.dense_features]
        self.dnn_feature_columns = self.fixlen_feature_columns
        self.linear_feature_columns = self.fixlen_feature_columns

        if self.deepdcn is None:
            self.deepdcn = dcn.DCN(feat_sizes, self.embedding_size, self.linear_feature_columns, self.dnn_feature_columns).to(device)
        
        downloadtime_output = self.activate_func(self.fc_downloadtime(downloadtime_input))
        
        layer01_output = self.deepdcn(layer01_input)
        alg_shu_fc = self.activate_func(self.fc_alg_shu(s_).squeeze(1))

        # print(downloadtime_output.shape)
        # print(layer01_output.shape)
        # print(alg_shu_fc.shape)
        downloadtime_output = downloadtime_output.view(1, 512)  
        layer01_output = layer01_output.view(25, 32)  
        alg_shu_fc = alg_shu_fc.view(26, 512)  

        
        layer01_output = layer01_output.to(device)
        layer01_output = self.linear_layer(layer01_output)
        #print(layer01_output.shape)

        # 执行加法操作
        #combined_output = downloadtime_output + layer01_output + alg_shu_fc
        combined_output = torch.cat((downloadtime_output ,layer01_output ,alg_shu_fc),dim=0)
        #print(combined_output.shape)

        combined_output = self.activate_func(self.fc2(combined_output))
        combined_output = self.activate_func(self.fc3(combined_output))
        a_prob = torch.softmax(self.fc4(combined_output), dim=1)
        a_prob = a_prob.view(1,-1)
        a_prob = self.linear_layer_2(a_prob)
        
        return a_prob





class Critic(nn.Module):
    def __init__(self, args):
        super(Critic, self).__init__()
        self.fc1 = nn.Linear(37281, args.hidden_width)
        self.fc2 = nn.Linear(args.hidden_width, args.hidden_width)
        self.fc3 = nn.Linear(args.hidden_width, 1)
        self.activate_func = [nn.ReLU(), nn.Tanh()][args.use_tanh]  

        if args.use_orthogonal_init:
            print("------use_orthogonal_init------")
            orthogonal_init(self.fc1)
            orthogonal_init(self.fc2)
            orthogonal_init(self.fc3)

    def forward(self, s):
        s = torch.flatten(s, start_dim=1)
        
        s = self.activate_func(self.fc1(s))
        s = self.activate_func(self.fc2(s))
        v_s = self.fc3(s)
        return v_s
   
   
   
class RunningMeanStd:
    # Dynamically calculate mean and std
    def __init__(self, shape):  # shape:the dimension of input data
        self.n = 0
        self.mean = np.zeros(shape)
        self.S = np.zeros(shape)
        self.std = np.sqrt(self.S)

    def update(self, x):
        x = np.array(x)
        self.n += 1
        if self.n == 1:
            self.mean = x
            self.std = x
        else:
            old_mean = self.mean.copy()
            self.mean = old_mean + (x - old_mean) / self.n
            self.S = self.S + (x - old_mean) * (x - self.mean)
            self.std = np.sqrt(self.S / self.n)


class RewardScaling:
    def __init__(self, shape, gamma):
        self.shape = shape  # reward shape=1
        self.gamma = gamma  # discount factor
        self.running_ms = RunningMeanStd(shape=self.shape)
        self.R = np.zeros(self.shape)

    def __call__(self, x):
        self.R = self.gamma * self.R + x
        self.running_ms.update(self.R)
        x = x / (self.running_ms.std + 1e-8)  # Only divided std
        return x

    def reset(self):  # When an episode is done,we should reset 'self.R'
        self.R = np.zeros(self.shape)


class PPO_discrete:
    def __init__(self, args):
        self.batch_size = args.batch_size
        self.mini_batch_size = args.mini_batch_size
        self.max_train_steps = args.max_train_steps
        self.lr = args.lr
        self.lr_a = args.lr_a  # Learning rate of actor
        self.lr_c = args.lr_c  # Learning rate of critic
        self.gamma = args.gamma  # Discount factor
        self.lamda = args.lamda  # GAE parameter
        self.epsilon = args.epsilon  # PPO clip parameter
        self.K_epochs = args.K_epochs  # PPO parameter
        self.entropy_coef = args.entropy_coef  # Entropy coefficient
        self.set_adam_eps = args.set_adam_eps
        self.use_grad_clip = args.use_grad_clip
        self.use_lr_decay = args.use_lr_decay
        self.use_adv_norm = args.use_adv_norm

        self.actor = Actor(args).to(device)
        self.critic = Critic(args).to(device)
        self.rewardscaling = RewardScaling(1, self.gamma)

        if self.set_adam_eps:
            self.optimizer_actor = torch.optim.Adam(self.actor.parameters(), lr=self.lr_a, eps=1e-5)
            self.optimizer_critic = torch.optim.Adam(self.critic.parameters(), lr=self.lr_c, eps=1e-5)
        else:
            self.optimizer_actor = torch.optim.Adam(self.actor.parameters(), lr=self.lr_a)
            self.optimizer_critic = torch.optim.Adam(self.critic.parameters(), lr=self.lr_c)

    def choose_action(self, s, uid, obs):
        available_actions = obs['next_can']
        ttt = []
        for id, item in enumerate(available_actions):
            if item == 1:
                ttt.append(id)
        # print('(choose_action)available_actions: ', ttt)

        s = torch.unsqueeze(torch.tensor(s, dtype=torch.float), 0).to(device)
        # print("The input to the actors: ", s)
        # exit(39)
        with torch.no_grad():
            available_actions = torch.tensor(available_actions, dtype=torch.bool)
            probs = self.actor(obs)
            dist = CategoricalMasked(logits=probs, mask=available_actions)
            # a = dist.sample()
            a = dist.argmax()
        if len(ttt) != 0:
            action_retry_count = 0
            usr_locx = [j for i, j, k in obs['each_use_loc'] if i == uid][0]
            usr_locy = [k for i, j, k in obs['each_use_loc'] if i == uid][0]

            while self.satisfied_constrain(a.item(), uid, obs) == False:  # not satify constrains
                a = dist.sample()  # arbitrary prob distribution
                action_retry_count += 1
                if action_retry_count >= config.EDGE_NODE_NUM-1  or usr_locx == np.inf or usr_locy == np.inf:
                    # raise EnvironmentError
                    a = config.EDGE_NODE_NUM  # to cloud
                    a = torch.tensor([a])
                    break
        else:
            a = config.EDGE_NODE_NUM
            a = torch.tensor([a])

        with torch.no_grad():
            a = a.to(device)
            a_logprob = dist.log_prob(a)

        a = a.item()
        a_logprob = a_logprob.item()
        # return a.numpy()[0], a_logprob.numpy()[0]
        return a, a_logprob

    def satisfied_constrain(self, edge_id, uidre, obs=None):
        # original from baseline.py
        one_ulyr = [j for i, j in obs['each_usrhas_lyer'] if i == uidre][0]
        if edge_id == config.EDGE_NODE_NUM:
            return True
        edge_con = [d for a, b, c, d, f in obs['each_edge_cpumemcondisk'] if a == edge_id][0]
        if config.node_max_container_number - edge_con - 1 < 0:
            print('edge {} container num is {} > 5.'.format(edge_id, edge_con))
            return False
        # edge_usrnum_limit
        if config.node_usrnum_limit - obs['each_edge_usrnum'][edge_id] - 1 < 0:
            print('edge {} has user num {} > 50.'.format(edge_id, obs['each_edge_usrnum'][edge_id]))
            return False
        # edge_storage_free - task_size - download_size
        tmp_downsize = 0
        for item in one_ulyr:
            if item not in obs['each_edgehas_lyr'][edge_id]:
                tmp_downsize += obs['all_layer_downsiz'][item]

        edge_disk = [f for a, b, c, d, f in obs['each_edge_cpumemcondisk'] if a == edge_id][0]

        usr_has_tsksiz = [k for i, j, k in obs['each_usrhas_task'] if i == uidre][0]

        if edge_disk - usr_has_tsksiz - tmp_downsize < 0:
            print('edge {a} has disk {b} - user_tsk_size:{c} - layer_download_size:{d} < 0.'.format(a=edge_id,
                                                                                                    b=edge_disk,
                                                                                                    c=usr_has_tsksiz,
                                                                                                    d=tmp_downsize))
            return False

    def update1(self, replay_buffer, total_steps,obs):
        
        s, a, a_logprob, r, s_, dw, done = replay_buffer.numpy_to_tensor()
        """
            Calculate the advantage using GAE
            'dw=True' means dead or win, there is no next state s'
            'done=True' represents the terminal of an episode(dead or win or reaching the max_episode_steps). When calculating the adv, if done=True, gae=0
        """
        a = a.to(device)
        s_ = s_.to(device)
        r = r.to(device)
        s = s.to(device)
        td_target = r + self.gamma * self.critic(s_) * (1 - done).to(device)
        td_delta = td_target - self.critic(s)

        td_delta_tmp = td_delta.detach().cpu().numpy()
        advantage_list = []
        advantage = 0.0
        for delta in td_delta_tmp[::-1]:
            advantage = self.gamma * self.lamda * advantage + delta
            advantage_list.append(advantage)
        advantage_list.reverse()

        advantage = torch.tensor(np.array(advantage_list), dtype=torch.float).to(device)
        obs['alg_shu'] = s
        old_log_probs = torch.log(self.actor(obs).gather(1, a)).detach().to(device)

        for _ in range(self.K_epochs):
            for index in BatchSampler(SubsetRandomSampler(range(self.batch_size)), self.mini_batch_size, False):
                obs['alg_shu'] = s[index]
                log_probs = torch.log(self.actor(obs).gather(1, a[index])).to(device)
                ratio = torch.sigmoid(log_probs - old_log_probs[index])
                surr1 = ratio * advantage[index]
                surr2 = torch.clamp(ratio, 1 - self.epsilon, 1 + self.epsilon) * advantage[index]  # clip
                actor_loss = torch.mean(-torch.min(surr1, surr2))  # loss
                critic_loss = torch.mean(F.mse_loss(self.critic(s[index]), td_target[index].detach())).to(device)
                self.optimizer_actor.zero_grad()
                self.optimizer_critic.zero_grad()
                actor_loss.backward()
                if self.use_grad_clip:  # Trick 7: Gradient clip
                    torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)
                critic_loss.backward()
                if self.use_grad_clip:  # Trick 7: Gradient clip
                    torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)
                self.optimizer_actor.step()
                self.optimizer_critic.step()

        if self.use_lr_decay:  # learning rate Decay
            self.lr_decay(total_steps)

        return actor_loss, critic_loss

    def update(self, replay_buffer, total_steps,obs):

        s, a, a_logprob, r, s_, dw, done = replay_buffer.numpy_to_tensor()
        
        """
            Calculate the advantage using GAE
            'dw=True' means dead or win, there is no next state s'
            'done=True' represents the terminal of an episode(dead or win or reaching the max_episode_steps). When calculating the adv, if done=True, gae=0
        """
        a = a.to(device)
        s_ = s_.to(device)
        r = r.to(device)
        s = s.to(device)
        dw = dw.to(device)
        a_logprob = a_logprob.to(device)
        adv = []
        gae = 0
        with torch.no_grad():
            a = a.to(device)
            s_ = s_.to(device)
            r = r.to(device)
            s = s.to(device)
            dw = dw.to(device)
            a_logprob = a_logprob.to(device)

            vs = self.critic(s)
            vs_ = self.critic(s_)
            deltas = r + self.gamma * (1.0 - dw) * vs_ - vs
            for delta, d in zip(reversed(deltas.flatten().cpu().numpy()), reversed(done.flatten().cpu().numpy())):
                gae = delta + self.gamma * gae * (1.0 - d)
                adv.insert(0, gae)
            adv = torch.tensor(adv, dtype=torch.float).view(-1, 1).to(device)
            v_target = adv + vs
            if self.use_adv_norm:
                adv = ((adv - adv.mean()) / (adv.std() + 1e-10))

        for _ in range(self.K_epochs):
            for index in BatchSampler(SubsetRandomSampler(range(self.batch_size)), self.mini_batch_size, False):
                
               # print(s[index].shape)
                obs['alg_shu'] = s[index]
                probs_clamped = torch.clamp(self.actor(obs), min=0)
                
                # 对概率值进行归一化
                probs_normalized = F.softmax(probs_clamped, dim=1)

                dist_now = Categorical(probs=probs_normalized)
                #print(dist_now)
                dist_entropy = dist_now.entropy().view(-1, 1)
                
                #print(a[index].shape)
                a_logprob_now = dist_now.log_prob(a[index].squeeze()).view(-1, 1).to(device)
                #print(a_logprob_now.shape)
                #print(a_logprob[index].shape)
                radios = torch.exp(a_logprob_now - a_logprob[index])
                #print(radios.shape)
                surr1 = radios * adv[index]
                surr2 = torch.clamp(radios, 1 - self.epsilon, 1 + self.epsilon) * adv[index]
                actor_loss = -torch.min(surr1, surr2) - self.entropy_coef * dist_entropy
                
                #print(actor_loss.shape)
                # self.optimizer_actor.zero_grad()
                actor_loss = actor_loss.mean()
                self.optimizer_actor.zero_grad()
                actor_loss.backward()
                exit(1)
                if self.use_grad_clip:
                    torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)
                self.optimizer_actor.step()

                v_s = self.critic(s[index])
                # critic_loss = F.mse_loss(v_target[index], v_s)
                critic_loss = F.mse_loss(v_s, v_target[index])

                self.optimizer_critic.zero_grad()
                critic_loss.backward()
                if self.use_grad_clip:
                    torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)
                self.optimizer_critic.step()

        if self.use_lr_decay:
            self.lr_decay(total_steps)

        return actor_loss, critic_loss

    def lr_decay(self, total_steps):
        # if self.lr_a < 1e-6 :
        #     lr_a_now = self.lr_a
        # else:
        #     lr_a_now = self.lr_a * (1 - total_steps / self.max_train_steps)
        #
        # if self.lr_c < 1e-6 :
        #     lr_c_now = self.lr_c
        # else:
        #     lr_c_now = self.lr_c * (1 - total_steps / self.max_train_steps)
        #
        # if  self.lr < 1e-6 :
        #     lr_now = self.lr
        # else:
        #     lr_now = self.lr * (1 - total_steps / self.max_train_steps)
        lr_a_now = self.lr_a * (1 - total_steps / self.max_train_steps)
        lr_c_now = self.lr_c * (1 - total_steps / self.max_train_steps)
        lr_now = self.lr * (1 - total_steps / self.max_train_steps)
        for p in self.optimizer_actor.param_groups:
            p['lr'] = lr_a_now
        for p in self.optimizer_critic.param_groups:
            p['lr'] = lr_c_now
        for p in self.optimizer_actor.param_groups:
            p['lr'] = lr_now

    def lr_decay1(self, total_steps):
        lr_a_now = self.lr_a * math.pow(0.95, total_steps / 45)
        lr_c_now = self.lr_c * math.pow(0.95, total_steps / 45)
        lr_now = self.lr * math.pow(0.98, total_steps / 45)
        for p in self.optimizer_actor.param_groups:
            p['lr'] = lr_a_now
        for p in self.optimizer_critic.param_groups:
            p['lr'] = lr_c_now
        for p in self.optimizer_actor.param_groups:
            p['lr'] = lr_now

    def save_model(self, total_epochs, path):
        if not os.path.exists(path):
            open(path, 'a').close()
        checkpoint = {
            "net_actor": self.actor.state_dict(),
            "net_ctritic": self.critic.state_dict(),
            "optimizer": self.optimizer_actor.state_dict(),
            "optimizer": self.optimizer_critic.state_dict(),
            "epoch": total_epochs,
        }
        torch.save(checkpoint, path)

    def load_model(self, path):
        checkpoint = torch.load(path)

        # self.ac.load_state_dict(checkpoint["net"])
        self.actor.load_state_dict(checkpoint["net"])
        self.critic.load_state_dict(checkpoint["net"])
        self.optimizer_actor.load_state_dict(checkpoint["optimizer"])
        epoch = checkpoint["epoch"]
        return epoch
