import secrets
import torch
import torch.nn as nn
relu = nn.ReLU()

def random_list_generate(net_scale,connect_number):     #生成不重复真随机整数列表
    connect_list=[]
    while len(connect_list)<connect_number:
        i=secrets.randbelow(net_scale)
        if i not in connect_list:
            connect_list.append(i)
    return connect_list
random_list_generate(12,10)

class NeuronUnitGroup():
    def __init__(self,connect_num_linear=4,connect_num_exponent=2,turning_point_num=5,
                 value_memory_num=5,group_scale=10,last_frame_size=12,
                 device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
        self.group_scale=group_scale
        self.value_memory_num=value_memory_num
        self.turning_point_num=turning_point_num
        self.device=device
        #加权计算
            #=[[nu.连接列表,nu.w],,,]
            #=[[[1,2],tensor([0.1,0.2])],,,]
        self.connect_linear=[[random_list_generate(last_frame_size,connect_num_linear),torch.normal(1,0.1,(connect_num_linear,),requires_grad=True,device=device)] for i in range(group_scale)]
        self.connect_exponent=[[random_list_generate(last_frame_size,connect_num_exponent),torch.normal(1,0.1,(connect_num_exponent,),requires_grad=True,device=device)] for i in range(group_scale)]
        #变形激活函数
            #size=(net_scale x turning_point_num)
            #=[[point.x[个数],point.w[个数]],,,]
        self.turning_pointr_list=[[torch.rand(group_scale,requires_grad=True,device=device),torch.normal(0,0.01,(group_scale,),requires_grad=True,device=device)] for i in range(turning_point_num)]
        self.turning_point_list=[]
        for i in range(turning_point_num):
            self.turning_point_list.append([self.turning_pointr_list[i][0]*4-2,self.turning_pointr_list[i][1]])
        #时间累积
        self.value_memory_init=torch.normal(1,0.1,(group_scale,value_memory_num),requires_grad=True,device=device)
        self.value_memory=self.value_memory_init
        self.w_steadily_refresh=torch.normal(1,0.1,(group_scale,value_memory_num),requires_grad=True,device=device)
        self.w_following_refresh_init=torch.normal(1,0.1,(group_scale,value_memory_num),requires_grad=True,device=device)
        self.w_following_refresh=self.w_following_refresh_init
        self.value=torch.sum(self.value_memory*(self.w_following_refresh+self.w_steadily_refresh)/(value_memory_num*2),dim=1)
    def flexible_activation(self,input):
        y=torch.zeros(self.group_scale,device=self.device)
        for i in range(len(self.turning_point_list)):
            y=y+self.turning_point_list[i][1]*relu(input+self.turning_point_list[i][0])
        return y/self.turning_point_num+1
    def before_forget(self,forget_rate):
        with torch.no_grad():
            for i in range(len(self.connect_linear)):
                self.connect_linear[i][1]=self.connect_linear[i][1]/forget_rate
            for i in range(len(self.connect_exponent)):
                self.connect_exponent[i][1]=self.connect_exponent[i][1]/forget_rate
    def forget(self,forget_rate):
        for i in range(len(self.connect_linear)):
            self.connect_linear[i][1]=self.connect_linear[i][1]*forget_rate
        for i in range(len(self.connect_exponent)):
            self.connect_exponent[i][1]=self.connect_exponent[i][1]*forget_rate
    def compute(self,last_frame):
        #加权运算
        l=torch.zeros(self.group_scale,device=self.device)
        for i in range(len(self.connect_linear)):
            l[i]=l[i]+torch.sum((last_frame[self.connect_linear[i][0]]*self.connect_linear[i][1]/len(self.connect_linear[i][0])),dim=0)
        e=torch.zeros(self.group_scale,device=self.device)
        for i in range(len(self.connect_exponent)):
            e[i]=e[i]+torch.sum((last_frame[self.connect_exponent[i][0]]*self.connect_exponent[i][1]/len(self.connect_exponent[i][0])),dim=0)
        l=l+e-2
        #时间累积
        self.value_memory=torch.cat((l.reshape(self.group_scale,1),self.value_memory),dim=1)[:,:-1]
        self.w_following_refresh=torch.cat((self.w_following_refresh[:,-1].reshape(self.group_scale,1),self.w_following_refresh),dim=1)[:,:-1]
        self.value=torch.sum(self.value_memory*(self.w_following_refresh+self.w_steadily_refresh)/(self.value_memory_num*2),dim=1)
        #变形激活函数
        self.value=self.flexible_activation(self.value)
    def reload(self):
        with torch.no_grad():
            self.value_memory=self.value_memory
            self.w_following_refresh=self.w_following_refresh
            self.value=self.value
    def restart(self):
        with torch.no_grad():
            self.value_memory=torch.tensor(self.value_memory_init.tolist())
            self.w_following_refresh=torch.tensor(self.w_following_refresh_init.tolist())
            self.value=torch.sum(self.value_memory*(self.w_following_refresh+self.w_steadily_refresh)/(self.value_memory_num*2),dim=1)
    def pruning(self,level_linear=0.001,level_exponent=0.001):  #=[[[1,2],tensor([0.1,0.2])] , [[1,2],tensor([0.1,0.2])]]
        connect=[]
        for nu in self.connect_linear:
            list=[]
            w=torch.tensor([],device=self.device)
            for i in range(len(nu[0])):
                if nu[1][i]>level_linear:
                    list.append(nu[0][i])
                    w=torch.cat((w,nu[1][i].unsqueeze(0)))
            connect.append([list,w])
        self.connect_linear=connect
        connect=[]
        for nu in self.connect_exponent:
            list=[]
            w=torch.tensor([],device=self.device)
            for i in range(len(nu[0])):
                if nu[1][i]>level_exponent:
                    list.append(nu[0][i])
                    w=torch.cat((w,nu[1][i].unsqueeze(0)))
            connect.append([list,w])
        self.connect_exponent=connect
    def supconnect(self,last_frame_size,num_linear=1,num_exponent=1):
        for nu in self.connect_linear:
            nu[0]+=random_list_generate(last_frame_size,num_linear)
            nu[1]=torch.cat((nu[1],torch.normal(1,0.1,(num_linear,),requires_grad=True,device=self.device)))
        for nu in self.connect_exponent:
            nu[0]+=random_list_generate(last_frame_size,num_exponent)
            nu[1]=torch.cat((nu[1],torch.normal(1,0.1,(num_exponent,),requires_grad=True,device=self.device)))
    def unforget(self,forget_rate):
        with torch.no_grad():
            for i in range(len(self.connect_linear)):
                self.connect_linear[i][1]=self.connect_linear[i][1]*forget_rate
            for i in range(len(self.connect_exponent)):
                self.connect_exponent[i][1]=self.connect_exponent[i][1]*forget_rate
    def parameters(self):
        parameters={}
        parameters['group_scale']=self.group_scale
        parameters['value_memory_num']=self.value_memory_num
        parameters['turning_point_num']=self.turning_point_num
        parameters['connect_linear']=self.connect_linear
        parameters['connect_exponent']=self.connect_exponent
        parameters['turning_pointr_list']=self.turning_pointr_list
        parameters['value_memory_init']=self.value_memory_init
        parameters['w_steadily_refresh']=self.w_steadily_refresh
        parameters['w_following_refresh_init']=self.w_following_refresh_init
        return parameters
    def load_parameters(self,parameters):
        self.group_scale=parameters['group_scale']
        self.value_memory_num=parameters['value_memory_num']
        self.turning_point_num=parameters['turning_point_num']
        self.connect_linear=parameters['connect_linear']
        self.connect_exponent=parameters['connect_exponent']
        self.turning_pointr_list=parameters['turning_pointr_list']
        self.turning_point_list=[[i[0]*4-2,i[1]] for i in self.turning_pointr_list]
        self.value_memory_init=parameters['value_memory_init']
        self.value_memory=self.value_memory_init
        self.w_steadily_refresh=parameters['w_steadily_refresh']
        self.w_following_refresh_init=parameters['w_following_refresh_init']
        self.w_following_refresh=self.w_following_refresh_init
        self.value=torch.sum(self.value_memory*(self.w_following_refresh+self.w_steadily_refresh)/(self.value_memory_num*2),dim=1)
    def param_4_optim(self):
        param_4_optim=[]
        for i in range(len(self.connect_linear)):
            param_4_optim.append(self.connect_linear[i][1])
        for i in range(len(self.connect_exponent)):
            param_4_optim.append(self.connect_exponent[i][1])
        for i in range(len(self.turning_pointr_list)):
            param_4_optim+=[self.turning_pointr_list[i][0],self.turning_pointr_list[i][1]]
        param_4_optim.append(self.value_memory_init)
        param_4_optim.append(self.w_steadily_refresh)
        param_4_optim.append(self.w_following_refresh_init)
        return param_4_optim
    def frame(self):
        frame={}
        frame['value_memory']=self.value_memory
        frame['w_following_refresh']=self.w_following_refresh
        frame['value']=self.value
        return frame
    def load_frame(self,frame):
        self.value_memory=frame['value_memory']
        self.w_following_refresh=frame['w_following_refresh']
        self.value=frame['value']