from cmath import log
from mimetypes import init
from tabnanny import check
from turtle import done
import numpy as np
import random ,math
from numpy.linalg import det
from device_ddpg import *
from BS_DDPG import *
from ris_ddpg import * 
import sys
from ris_ddpg import RIS


pi=math.pi

class NetWork_DDPG:
    Device = ['device1','device2','device3','device4','device5','device6','device7','denice8','device9','device10']  #number of device(N)
    RIS = ['ris1','ris2','ris3','ris4','ris5'] #number of RIS(K)
    BS = ['antenna1','antenna2','antenna3'] #number of antenna(M)
    N = len(Device)
    K = len(RIS)
    M = len(BS)
    

     #定义全局变量
    #power =0.5#100mw
    penalty = 100#惩罚
    B = 50*10**6#带宽KB/s
    n = 10**(-14)#噪声干扰σ^2
    #α = 0.8#表示时延和功耗之间的关系
    ς = 1*10**(-25)
    e = 10**(-9)#1W/GHZ
    bs_f_edge = 50*10**9
    α = np.zeros(N)
    f_local = np.zeros(N)
    for i in range (N):
        α[i] = random.uniform(0.4,0.6)
        f_local[i] = random.uniform(1*10**8,5*10**8)
    #λ = [0.99999458, 0., 0.9999017, 1., 0.99995238, 1., 1., 1.,  0.99986011, 0.99862623]#卸载系数
    action_bound = [-1,1]
    dim_state = 5*N+M*K+K*N+M*N
    dim_action = N+K+N*M 

    def __init__(self):
        self.name = 'env_ddpg'
        self.state1_dim = self.dim_state 
        self.action1_dim = self.dim_action  

    def GetInitState(self):
        init_local_state = np.zeros(self.dim_state)
        init_d = np.zeros(self.N)
        init_c = np.zeros(self.N)
        init_τ = np.zeros(self.N)
        init_λ = np.zeros(self.N)
        init_p = np.zeros(self.N)
        init_h_d = np.zeros((self.M,self.N))
        init_h_r = np.zeros((self.K,self.N))
        init_G_mk = np.zeros((self.M,self.K))
        for i in range(self.M):
            for j in range(self.N):
                device_init = Device(j)
                init_h_d[i][j] = (device_init.h_d)
        for i in range(self.K):
            for j in range(self.N):
                ris_init = RIS(j)
                init_h_r[i][j] = (ris_init.h_r)
        for i in range(self.M):
            for j in range(self.K):
                bs_init = BS(j)
                init_G_mk[i][j] = (bs_init.G_mk)
        init_G_mk = np.matrix(init_G_mk)
        for  i in range(self.N):
            device_init = Device(i)
            bs_init = BS(i)
            init_d[i] = (device_init.d)
            init_c[i] = (device_init.c)
            init_τ[i] = (device_init.τ)
            init_p[i] = (device_init.p)
            #init_λ[i] = random.uniform(0.3,1.0)
        init_λ = [0.9995823502540588, 0.987679660320282, 0.9822847247123718, 0.9999232292175293, 1.0, 1.0, 1.0, 1.0, 0.9841600656509399, 1.0]
        #将二维数组转化成一维数组
        init_h_d = np.array(init_h_d).flatten()
        init_h_r = np.array(init_h_r).flatten()
        init_G_mk = np.array(init_G_mk).flatten()
        #合并一维数组形成一维init_local_state
        array_local_state_list = [np.array(init_p),np.array(init_h_d),np.array(init_h_r),np.array(init_G_mk),np.array(init_d),np.array(init_c),np .array(init_τ),np.array(init_λ)]
        array_local_state_tuple = tuple(array_local_state_list)
        init_local_state = np.hstack(array_local_state_tuple)
        #print("init_local_state:",init_local_state)
        return init_local_state

    def GetActionMatrix(self):
        init_action = np.zeros(self.dim_action)
        init_f_edge = np.zeros(self.N)
        init_θ = np.zeros(self.K)
        init_w_h = np.zeros((self.N,self.M))
        #init_f_edge_random = np.random.dirichlet(np.ones(10))
        for i in range(self.N):
            bs_init = BS(i)
            init_f_edge[i] = (bs_init.f_edge)#init_f_edge_random[i]#(bs_init.f_edge)*
        for j in range(self.K):
            ris_init = RIS(j)
            init_θ[j] = (ris_init.θ)
        for i in range(self.N):
            for j in range(self.M): 
                bs_init = BS(j)
                init_w_h[i][j] = (bs_init.w_h)
        #将二维数组转化为一维数组
        init_w_h = np.array(init_w_h).flatten()
        #合并一维数组形成init_action
        array_action_list = [np.array(init_f_edge),np.array(init_w_h),np.array(init_θ)]
        array_action_tuple = tuple(array_action_list)
        init_action = np.hstack(array_action_tuple)
        #print("init_action:",init_action)
        return init_action
          
    #return state,done,Immdiate_Rrd
    def getNextState_Reward(self,curr_state, curr_action):
        #转换 state 和 action 矩阵
        N = self.N
        M = self.M
        K = self.K
        B = self.B
        α = self.α
        n = self.n
        ς = self.ς
        e = self.e
        bs_f_edge = self.bs_f_edge
        penalty = self.penalty
        f_local = self.f_local
        #定义状态空间
        next_state = curr_state
        next_action = (curr_action+1)/2
        p_receive =np.zeros(N)
        R = np.zeros(N)
        '''
        for i in range(N):
            next_state[i+N+N*M+K*N+M*K] =  next_state[i+N+N*M+K*N+M*K]*(random.uniform(2**21,2**24))
            next_state[i+2*N+N*M+K*N+M*K] = next_state[i+2*N+N*M+K*N+M*K]*(100)
        '''
        #print("next_state",next_state)
        p = next_state[:N]
        h_d = next_state[N:N+N*M]
        h_r = next_state[N+N*M:N+N*M+K*N]
        G_mk = next_state[N+N*M+K*N:N+N*M+K*N+M*K]
        d = next_state[N+N*M+K*N+M*K:2*N+N*M+K*N+M*K]
        c = next_state[2*N+N*M+K*N+M*K:3*N+N*M+K*N+M*K]
        τ = next_state[3*N+N*M+K*N+M*K:4*N+N*M+K*N+M*K]
        λ = next_state[4*N+N*M+K*N+M*K:5*N+N*M+K*N+M*K]
        h_d = np.array(h_d).reshape((M,N))
        h_r = np.array(h_r).reshape((K,N))
        G_mk = np.array(G_mk).reshape((M,K))
        G_mk = np.matrix(G_mk)
        
        #将action从0-1转换成相应量级
        sum = 0
        for i in range(N) :
                sum =sum + next_action[i]
        for i in range(N):
                next_action[i] =(next_action[i]/sum)*bs_f_edge
        for j in range(K):
                next_action[j+(N*M+N)] = next_action[j+(N*M+N)]*(2*pi)
        #print('next_action:',next_action)
        #定义动作空间
        f_edge = next_action[:N]
        w_h = next_action[N:N+N*M]
        θ = next_action[N+N*M:]
        θ = np.diag(θ)
        w_h = np.array(w_h).reshape((N,M))
        reward_sum = 0#奖励
        p_receive = []
        Immidate_Rrd = 0
        count = 0
        sum =0 
        for i in range(N):
            h_d_list = np.matrix(h_d[:,i:i+1])
            h_r_list = np.matrix(h_r[:,i:i+1])
            w_h_list = np.matrix(w_h[i:i+1,:])
            mat =w_h_list*(h_d_list+G_mk*θ*h_r_list)
            p_receive.append(p[i]*abs(np.linalg.det(mat))**2)#接收信号强度
            sum = sum+p_receive[i]
        #print('p_receive:',p_receive)
        for i in range(N):
            s = sum
            γ = p_receive[i]/((s-p_receive[i])+n)#接收到的第i个信号信噪比
            R[i] = abs(B*log((1+γ),2))#信道容量
            '''
            reward = α[i]*(λ[i]*d[i]*(c[i]*R[i]+(1+p[i]/α[i])*f_edge[i]))/(R[i]*f_edge[i])#奖励
            #print('reward:',reward)
            reward_sum = reward_sum+reward
            '''
        '''
        for i in range(N): 
            #a.append(λ[i]*d[i]*(c[i]*R[i]+f_edge[i])/(R[i]*f_edge[i]))
            #print('a:',a)
            if abs((λ[i]*d[i]*(c[i]*R[i]+f_edge[i])/(R[i]*f_edge[i])))<τ[i]:
                count = count+1
            else:
                break
        #print('a:',a,'count:',count)
        if count == N:
            Immidate_Rrd = -reward_sum
        else:
            Immidate_Rrd = penalty
        '''
        for i in range (N):
            random_sum = random.uniform(2**21,2**24)
            # print("λ[i]*d[i]*(c[i]*R[i]+f_edge[i])/(R[i]*f_edge[i])=",λ[i]*d[i]*(random.uniform(2**21,2**24))*(c[i]*100*R[i]+f_edge[i])/(R[i]*f_edge[i]))
            if abs((λ[i]*d[i]*(random_sum)*(c[i]*(100)*R[i]+f_edge[i])/(R[i]*f_edge[i])))<τ[i]:
                reward = (α[i]*(λ[i]*d[i]*(random_sum)*(c[i]*(100)*R[i]+(1+p[i]/α[i])*f_edge[i]))/(R[i]*f_edge[i]))#奖励
                # reward = α[i]*max(((1-λ[i])*d[i]*(random_sum)*c[i]*(100)/f_local[i]),(λ[i]*d[i]*(random_sum)*(c[i]*(100)*R[i]+f_edge[i])/(R[i]*f_edge[i])))\
                #                    +ς*((1-λ[i])*d[i]*(random_sum)*c[i]*(100)*(f_local[i])**2)+p[i]*(λ[i]*d[i]*(random_sum)/R[i])+λ[i]*d[i]*(random_sum)*c[i]*(100)*e
                #print("reward:",reward)
            else:
                reward = penalty
            Immidate_Rrd = -reward+Immidate_Rrd
        print("Immidate_Rrd = ",Immidate_Rrd)#第一种奖励
        check_task_finish =next_state[:]
        done =(all(i == 0 for i in check_task_finish))
        print("Done",done)
        return next_state,Immidate_Rrd,done

    def reset(self):
        state = self.GetInitState()
        return(state)
    

if __name__  == '__main__':

    env = NetWork_DDPG()
    state = env.reset()
    #print ("Initial State",state)
    for t in range(1):
       action_matrix = env.GetActionMatrix()
       state,Reward,done = env.getNextState_Reward(state,action_matrix)
       #print (done)
    #     print(Reward)
    #     print(state)
       #if (done) or (Reward==-10):
            # print ("dwheof",t)
        #     break
