from collections import namedtuple
from inspect import getargspec
import numpy as np
import torch
from torch import optim
import torch.nn as nn
from utils import *
from action_utils import *
import itertools
from TNN.utils import SNN,ten2mat

Transition = namedtuple('Transition', ('state', 'action', 'action_out', 'value', 'episode_mask', 'episode_mini_mask', 'next_state', 'reward', 'misc'))
TransitionLRR = namedtuple('TransitionLRR', ('state', 'action', 'action_out', 'value', 'episode_mask', 'episode_mini_mask', 'next_state', 'reward', 'misc', 'attention'))

class Trainer(object):
    def __init__(self, args, policy_net, env):
        self.args = args
        self.policy_net = policy_net
        self.env = env
        self.display = args.display
        self.last_step = False
        self.optimizer = optim.RMSprop(policy_net.parameters(),
            lr = args.lrate, alpha=0.97, eps=1e-6)
        self.params = [p for p in self.policy_net.parameters()]


    def get_episode(self, epoch):
        episode = []
        reset_args = getargspec(self.env.reset).args
        if 'epoch' in reset_args:
            state = self.env.reset(epoch)
        else:
            state = self.env.reset()
        should_display = self.display and self.last_step

        if should_display:
            self.env.display()
        stat = dict()
        info = dict()
        switch_t = -1

        prev_hid = torch.zeros(1, self.args.nagents, self.args.hid_size)

        for t in range(self.args.max_steps):
            misc = dict()

            if t == 0:
                prev_hid = self.policy_net.init_hidden(batch_size=state.shape[0])

            x = [state, prev_hid]
            action_out, value, prev_hid = self.policy_net(x, info)

            if (t + 1) % self.args.detach_gap == 0:
                prev_hid = (prev_hid[0].detach(), prev_hid[1].detach())

            action = select_action(self.args, action_out)
            action, actual = translate_action(self.args, self.env, action)
            next_state, reward, done, info = self.env.step(actual)

            if 'alive_mask' in info:
                misc['alive_mask'] = info['alive_mask'].reshape(reward.shape)
            else:
                misc['alive_mask'] = np.ones_like(reward)

            stat['reward'] = stat.get('reward', 0) + reward[:self.args.nfriendly]
            if hasattr(self.args, 'enemy_comm') and self.args.enemy_comm:
                stat['enemy_reward'] = stat.get('enemy_reward', 0) + reward[self.args.nfriendly:]

            done = done or t == self.args.max_steps - 1

            episode_mask = np.ones(reward.shape)
            episode_mini_mask = np.ones(reward.shape)

            if done:
                episode_mask = np.zeros(reward.shape)
            else:
                if 'is_completed' in info:
                    episode_mini_mask = 1 - info['is_completed'].reshape(-1)

            if should_display:
                self.env.display()

            trans = Transition(state, action, action_out, value, episode_mask, episode_mini_mask, next_state, reward, misc)
            episode.append(trans)
            state = next_state
            if done:
                break
        stat['num_steps'] = t + 1
        stat['steps_taken'] = stat['num_steps']

        if hasattr(self.env, 'reward_terminal'):
            reward = self.env.reward_terminal()

            episode[-1] = episode[-1]._replace(reward = episode[-1].reward + reward)
            stat['reward'] = stat.get('reward', 0) + reward[:self.args.nfriendly]
            if hasattr(self.args, 'enemy_comm') and self.args.enemy_comm:
                stat['enemy_reward'] = stat.get('enemy_reward', 0) + reward[self.args.nfriendly:]


        if hasattr(self.env, 'get_stat'):
            merge_stat(self.env.get_stat(), stat)
        return (episode, stat)


    def compute_grad(self, batch):
        stat = dict()
        # num_actions: number of discrete actions in the action space
        num_actions = self.args.num_actions
        # dim_actions: number of action heads
        dim_actions = self.args.dim_actions

        n = self.args.nagents
        batch_size = len(batch.state)

        # rewards: [batch_size * n]
        rewards = torch.Tensor(batch.reward)   # 都是不带梯度进来，那在哪算梯度？我在那加lrr的梯度？ 答：看清楚，有的是array，有的是tensor，带着梯度呢
        # episode_mask: [batch_size * n]
        episode_masks = torch.Tensor(batch.episode_mask)
        # episode_mini_mask: [batch_size * n]
        episode_mini_masks = torch.Tensor(batch.episode_mini_mask)
        actions = torch.Tensor(batch.action)
        # actions: [batch_size * n * dim_actions] have been detached
        actions = actions.transpose(1, 2).view(-1, n, dim_actions)

        values = torch.cat(batch.value, dim=0)
        action_out = list(zip(*batch.action_out))
        action_out = [torch.cat(a, dim=0) for a in action_out]

        # alive_masks: [batch_size * n]
        alive_masks = torch.Tensor(np.concatenate([item['alive_mask'] for item in batch.misc])).view(-1)

        coop_returns = torch.Tensor(batch_size, n)
        ncoop_returns = torch.Tensor(batch_size, n)
        returns = torch.Tensor(batch_size, n)
        deltas = torch.Tensor(batch_size, n)
        advantages = torch.Tensor(batch_size, n)
        values = values.view(batch_size, n)

        prev_coop_return = 0
        prev_ncoop_return = 0
        prev_value = 0
        prev_advantage = 0

        for i in reversed(range(rewards.size(0))):
            coop_returns[i] = rewards[i] + self.args.gamma * prev_coop_return * episode_masks[i]
            ncoop_returns[i] = rewards[i] + self.args.gamma * prev_ncoop_return * episode_masks[i] * episode_mini_masks[i]

            prev_coop_return = coop_returns[i].clone()
            prev_ncoop_return = ncoop_returns[i].clone()

            returns[i] = (self.args.mean_ratio * coop_returns[i].mean()) \
                        + ((1 - self.args.mean_ratio) * ncoop_returns[i])


        for i in reversed(range(rewards.size(0))):
            advantages[i] = returns[i] - values.data[i]

        if self.args.normalize_rewards:
            advantages = (advantages - advantages.mean()) / advantages.std()
            
        # element of log_p_a: [(batch_size*n) * num_actions[i]]
        log_p_a = [action_out[i].view(-1, num_actions[i]) for i in range(dim_actions)]
        # actions: [(batch_size*n) * dim_actions]
        actions = actions.contiguous().view(-1, dim_actions)

        if self.args.advantages_per_action:
            # log_prob: [(batch_size*n) * dim_actions]
            log_prob = multinomials_log_densities(actions, log_p_a)
            # the log prob of each action head is multiplied by the advantage
            action_loss = -advantages.view(-1).unsqueeze(-1) * log_prob
            action_loss *= alive_masks.unsqueeze(-1)
        else:
            # log_prob: [(batch_size*n) * 1]
            log_prob = multinomials_log_density(actions, log_p_a)
            action_loss = -advantages.view(-1) * log_prob.squeeze()
            action_loss *= alive_masks

        action_loss = action_loss.sum()
        stat['action_loss'] = action_loss.item()

        # value loss term
        targets = returns
        value_loss = (values - targets).pow(2).view(-1)
        value_loss *= alive_masks
        value_loss = value_loss.sum()

        stat['value_loss'] = value_loss.item()
        loss = action_loss + self.args.value_coeff * value_loss

        # entropy regularization term
        entropy = 0
        for i in range(len(log_p_a)):
            entropy -= (log_p_a[i] * log_p_a[i].exp()).sum()
        stat['entropy'] = entropy.item()
        if self.args.entr > 0:
            loss -= self.args.entr * entropy

        loss.backward()

        return stat

    def run_batch(self, epoch):
        batch = []
        self.stats = dict()
        self.stats['num_episodes'] = 0
        while len(batch) < self.args.batch_size:
            if self.args.batch_size - len(batch) <= self.args.max_steps:
                self.last_step = True
            episode, episode_stat = self.get_episode(epoch)
            merge_stat(episode_stat, self.stats)
            self.stats['num_episodes'] += 1
            batch += episode

        self.last_step = False
        self.stats['num_steps'] = len(batch)
        batch = Transition(*zip(*batch))
        return batch, self.stats

    def train_batch(self, epoch):
        batch, stat = self.run_batch(epoch)
        self.optimizer.zero_grad()

        s = self.compute_grad(batch)
        merge_stat(s, stat)
#         for name, param in self.policy_net.named_parameters():
#             print(name)
#             print(param.grad)
        for p in self.params:
            if p._grad is not None:
                p._grad.data /= stat['num_steps']
        self.optimizer.step()

        return stat

    def state_dict(self):
        return self.optimizer.state_dict()

    def load_state_dict(self, state):
        self.optimizer.load_state_dict(state)

class TrainerLRR(Trainer):
    def __init__(self, args, policy_net, env):
        super(TrainerLRR, self).__init__(args, policy_net, env)

    def get_episode(self, epoch):
        episode = []
        reset_args = getargspec(self.env.reset).args
        if 'epoch' in reset_args:
            state = self.env.reset(epoch)
        else:
            state = self.env.reset()
        # should_display = self.display and self.last_step
        should_display = self.display

        if should_display:
            self.env.display()
        stat = dict()
        info = dict()
        switch_t = -1

        prev_hid = torch.zeros(1, self.args.nagents, self.args.hid_size)

        for t in range(self.args.max_steps):
            misc = dict()

            if t == 0:
                prev_hid = self.policy_net.init_hidden(batch_size=state.shape[0])

            x = [state, prev_hid]
            action_out, value, prev_hid, attention, attention_nuclear = self.policy_net(x, info)

            if (t + 1) % self.args.detach_gap == 0:
                prev_hid = (prev_hid[0].detach(), prev_hid[1].detach())

            action = select_action(self.args, action_out)
            action, actual = translate_action(self.args, self.env, action)
            next_state, reward, done, info = self.env.step(actual)

            if 'alive_mask' in info:
                misc['alive_mask'] = info['alive_mask'].reshape(reward.shape)
            else:
                misc['alive_mask'] = np.ones_like(reward)

            stat['reward'] = stat.get('reward', 0) + reward[:self.args.nfriendly]
            if hasattr(self.args, 'enemy_comm') and self.args.enemy_comm:
                stat['enemy_reward'] = stat.get('enemy_reward', 0) + reward[self.args.nfriendly:]

            done = done or t == self.args.max_steps - 1

            episode_mask = np.ones(reward.shape)
            episode_mini_mask = np.ones(reward.shape)

            if done:
                episode_mask = np.zeros(reward.shape)
            else:
                if 'is_completed' in info:
                    episode_mini_mask = 1 - info['is_completed'].reshape(-1)

            if should_display:
                self.env.display()

            trans = TransitionLRR(state, action, action_out, value, episode_mask, episode_mini_mask, next_state, reward, misc, attention)
            episode.append(trans)
            state = next_state
            if done:
                break
            for layer_index in range(len(attention_nuclear)):
                # Attention: names for tensorboard are index from 1 instead of 0
                key_every_head_rank = 'attention%i_every_head_rank' % (layer_index+1)
                key_all_head_rank = 'attention%i_all_head_rank' % (layer_index+1)
                key_sorted_every_head_rank = 'attention%i_sorted_every_head_rank' % (layer_index+1)
                stat[key_every_head_rank] = attention_nuclear[layer_index][0]
                stat[key_all_head_rank] = attention_nuclear[layer_index][1]
                stat[key_sorted_every_head_rank] = attention_nuclear[layer_index][2]
                # if we want to record the nuclear norm of 1-subscheduler
                if len(attention_nuclear[layer_index]) ==6  and layer_index==0:
                    key_every_head_rank = 'sub_scheduler_attention%i_every_head_rank' % (layer_index+1)
                    key_all_head_rank = 'sub_scheduler_attention%i_all_head_rank' % (layer_index+1)
                    key_sorted_every_head_rank = 'sub_scheduler_attention%i_sorted_every_head_rank' % (layer_index+1)
                    stat[key_every_head_rank] = attention_nuclear[layer_index][3]
                    stat[key_all_head_rank] = attention_nuclear[layer_index][4]
                    stat[key_sorted_every_head_rank] = attention_nuclear[layer_index][5]
                if len(attention_nuclear[layer_index]) == 4  and layer_index==0:
                    key_every_head_rank = 'sub_scheduler_attention%i_every_head_rank' % (layer_index+1)
                    stat[key_every_head_rank] = attention_nuclear[layer_index][3]

        stat['num_steps'] = t + 1
        stat['steps_taken'] = stat['num_steps']

        if hasattr(self.env, 'reward_terminal'):
            reward = self.env.reward_terminal()

            episode[-1] = episode[-1]._replace(reward = episode[-1].reward + reward)
            stat['reward'] = stat.get('reward', 0) + reward[:self.args.nfriendly]
            if hasattr(self.args, 'enemy_comm') and self.args.enemy_comm:
                stat['enemy_reward'] = stat.get('enemy_reward', 0) + reward[self.args.nfriendly:]


        if hasattr(self.env, 'get_stat'):
            merge_stat(self.env.get_stat(), stat)
        return (episode, stat)

    def run_batch(self, epoch):
        batch = []
        self.stats = dict()
        self.stats['num_episodes'] = 0
        while len(batch) < self.args.batch_size:
            if self.args.batch_size - len(batch) <= self.args.max_steps:
                self.last_step = True
            episode, episode_stat = self.get_episode(epoch)
            merge_stat(episode_stat, self.stats)
            self.stats['num_episodes'] += 1
            batch += episode

        self.last_step = False
        self.stats['num_steps'] = len(batch)
        batch = TransitionLRR(*zip(*batch))
        return batch, self.stats

    def compute_grad(self, batch):
        stat = dict()
        # num_actions: number of discrete actions in the action space
        num_actions = self.args.num_actions
        # dim_actions: number of action heads
        dim_actions = self.args.dim_actions

        n = self.args.nagents
        batch_size = len(batch.state)

        # rewards: [batch_size * n]
        rewards = torch.Tensor(np.array(batch.reward))  # 都是不带梯度进来，那在哪算梯度？我在那加lrr的梯度？ 答：看清楚，有的是array，有的是tensor，带着梯度呢
        # episode_mask: [batch_size * n]
        episode_masks = torch.Tensor(np.array(batch.episode_mask))
        # episode_mini_mask: [batch_size * n]
        episode_mini_masks = torch.Tensor(np.array(batch.episode_mini_mask))
        actions = torch.Tensor(np.array(batch.action))
        # actions: [batch_size * n * dim_actions] have been detached
        actions = actions.transpose(1, 2).view(-1, n, dim_actions)

        values = torch.cat(batch.value, dim=0)
        action_out_mid = list(zip(*batch.action_out))
        action_out = [torch.cat(a, dim=0) for a in action_out_mid]



        # alive_masks: [batch_size * n]
        alive_masks = torch.Tensor(np.concatenate([item['alive_mask'] for item in batch.misc])).view(-1)

        coop_returns = torch.Tensor(batch_size, n)
        ncoop_returns = torch.Tensor(batch_size, n)
        returns = torch.Tensor(batch_size, n)
        deltas = torch.Tensor(batch_size, n)
        advantages = torch.Tensor(batch_size, n)
        values = values.view(batch_size, n)

        prev_coop_return = 0
        prev_ncoop_return = 0
        prev_value = 0
        prev_advantage = 0

        for i in reversed(range(rewards.size(0))):
            coop_returns[i] = rewards[i] + self.args.gamma * prev_coop_return * episode_masks[i]
            ncoop_returns[i] = rewards[i] + self.args.gamma * prev_ncoop_return * episode_masks[i] * episode_mini_masks[
                i]

            prev_coop_return = coop_returns[i].clone()
            prev_ncoop_return = ncoop_returns[i].clone()

            returns[i] = (self.args.mean_ratio * coop_returns[i].mean()) \
                         + ((1 - self.args.mean_ratio) * ncoop_returns[i])

        for i in reversed(range(rewards.size(0))):
            advantages[i] = returns[i] - values.data[i]

        if self.args.normalize_rewards:
            advantages = (advantages - advantages.mean()) / advantages.std()

        # element of log_p_a: [(batch_size*n) * num_actions[i]]
        log_p_a = [action_out[i].view(-1, num_actions[i]) for i in range(dim_actions)]
        # actions: [(batch_size*n) * dim_actions]
        actions = actions.contiguous().view(-1, dim_actions)

        if self.args.advantages_per_action:
            # log_prob: [(batch_size*n) * dim_actions]
            log_prob = multinomials_log_densities(actions, log_p_a)
            # the log prob of each action head is multiplied by the advantage
            action_loss = -advantages.view(-1).unsqueeze(-1) * log_prob
            action_loss *= alive_masks.unsqueeze(-1)
        else:
            # log_prob: [(batch_size*n) * 1]
            log_prob = multinomials_log_density(actions, log_p_a)
            action_loss = -advantages.view(-1) * log_prob.squeeze()
            action_loss *= alive_masks

        action_loss = action_loss.sum()
        stat['action_loss'] = action_loss.item()

        # value loss term
        targets = returns
        value_loss = (values - targets).pow(2).view(-1)
        value_loss *= alive_masks
        value_loss = value_loss.sum()

        stat['value_loss'] = value_loss.item()
        loss = action_loss + self.args.value_coeff * value_loss
        # loss = 0

        # entropy regularization term
        entropy = 0
        for i in range(len(log_p_a)):
            entropy -= (log_p_a[i] * log_p_a[i].exp()).sum()
        stat['entropy'] = entropy.item()
        if self.args.entr > 0:
            loss -= self.args.entr * entropy

        # rank regularization term
        # rank_to_low = 0
        # rank_to_high = 0
        # # attention:[batch_size, n,n,num_heads]???????? right or not?
        # for atten in attention[0]:
        #     atten_allHeads = atten.permute(2,0,1)
        #     num_heads = atten_allHeads.shape[0]
        #     # low rank for each head
        #     for atten_Head in atten_allHeads:
        #         _, sigma, _ = torch.svd(atten_Head, some=True)
        #         nuclear = torch.sum(sigma)
        #         rank_to_low += nuclear
        #     # rank_to_low /= num_heads
        #
        #     # high rank for all heads
        #     atten_allHeads = atten_allHeads.view(num_heads,-1)
        #     _, sigma, _ = torch.svd(atten_allHeads, some=True)
        #     nuclear = torch.sum(sigma)
        #     rank_to_high += nuclear

            # rank_loss = low_rank_loss + high_rank_loss
        # 在main函数中，统一对stat进行divide,所以下面两行没必要
        # rank_to_low /= attention[0].shape[0] * attention[0].shape[-1]
        # rank_to_high /= attention[0].shape[0]
        # rank_to_high = 0
        # # rank_to_value = 0
        # for atten in attention[0]:
        #     _, sigma, _ = torch.svd(atten[:,:,0], some=True) # atten[N,N,num_heads], now only consider 1 head condition.
        #     nuclear = torch.sum(sigma)
        #     # if nuclear < n/5:
        #     rank_to_high += nuclear
        #     # rank_to_value += (nuclear - n/5) * (nuclear - n/5)
        attention_mid = list(zip(*batch.attention))
        attention1 = [torch.cat(atten, dim=0) for atten in attention_mid[:1]]
        attention2 = [torch.cat(atten, dim=0) for atten in attention_mid[1:2]]
        if self.args.method == 'magicLRR':
            # rank_to_high1 = self.high_rank_loss(attention1[0],self.args.gat_num_regulated_heads)
            rank_to_high1 = self.high_TNN_loss(attention1[0], mode=self.args.TNN_mode1)
        elif self.args.method == 'magicSubschedulerLRR' or self.args.method == 'magicTNNLRR' or self.args.method == 'STNNR':
            rank_to_high1 = self.high_TNN_loss(attention1[0],mode='Softmax')
        else:
            NotImplementedError

        if self.args.method == 'magicLRR':
            # rank_to_high1 = self.high_rank_loss(attention1[0],self.args.gat_num_regulated_heads)
            rank_to_high2 = self.high_TNN_loss(attention2[0], mode=self.args.TNN_mode2)
        elif self.args.method == 'magicSubschedulerLRR' or self.args.method == 'magicTNNLRR' or self.args.method == 'STNNR':
            rank_to_high2 = self.high_TNN_loss(attention2[0],mode='Softmax')
        else:
            NotImplementedError

        # if self.args.method == 'magicLRR' or self.args.method == 'magicSubschedulerLRR' or self.args.method == 'STNNR' or self.args.method == 'magicTNNLRR':
        #     rank_to_high2 = self.high_rank_loss(attention2[0], self.args.gat_num_regulated_heads_out)
        # elif self.args.method=='magicLRR_3layers':
        #     attention3 = [torch.cat(atten, dim=0) for atten in attention_mid[2:3]]
        #     rank_to_high2 = self.high_rank_loss(attention2[0], self.args.gat_num_regulated_heads)
        #     rank_to_high3 = self.high_rank_loss(attention3[0],self.args.gat_num_regulated_heads_out)


        # stat['rank_to_high'] = rank_to_high.item()
        learning_rate1 = abs((loss.item() * self.args.high_rank_coeff1) / rank_to_high1.item())
        # # # loss -= self.args.high_rank_coeff1 * rank_to_high1 # try high rank regularization
        loss -= learning_rate1 * rank_to_high1
        # print(loss/len(attention1[0]))
        learning_rate2 = abs((loss.item() * self.args.high_rank_coeff2) / rank_to_high2.item())
        loss -= learning_rate2 * rank_to_high2  # try high rank regularization
        # print(loss)
        if self.args.method == 'magicLRR_3layers':
            loss -= self.args.high_rank_coeff3 * rank_to_high3
        # loss -= self.args.high_rank_coeff * rank_to_high
        loss.backward()
        stat['rank_to_high1'] = rank_to_high1.item()
        stat['rank_to_high2'] = rank_to_high2.item()


        return stat

    def high_rank_loss(self,batch_attention_matrix,num_regulated_head):
        nuclear_average_head_to_be_divided = 0
        # num_head = batch_attention_matrix.shape[-1]
        # rank_to_value = 0
        for attention_matrix in batch_attention_matrix:
            nuclear_total_head = 0
            for head_index in range(num_regulated_head):
                try:
                    _, sigma, _ = torch.svd(attention_matrix[:, :, head_index], some=True)  # atten[N,N,num_heads],
                    nuclear = torch.sum(sigma)
                except:
                    nuclear = 0
                # if nuclear < n/5:
                nuclear_total_head += nuclear
            nuclear_average_head_to_be_divided += nuclear_total_head/num_regulated_head
        return nuclear_average_head_to_be_divided

    def high_TNN_loss(self, batch_attention_tensor, mode):
        total_batch_nuclear = 0
        for attention_tensor in batch_attention_tensor:
            # try:
            if mode == 'FFT':
                nuclear = self.calculate_FFT_TNN(attention_tensor)
            elif mode == 'Softmax':
                nuclear = self.calculate_Softmax_TNN(attention_tensor)
            else:
                raise NotImplementedError
            # except:
            #     print(attention_tensor)
            total_batch_nuclear += nuclear
        return total_batch_nuclear

    # def get_TNN(self,three_way_tensor):
    #     tensor_A = torch.fft.fftn(three_way_tensor, 3)
    #     A_ = torch.block_diag(tensor_A[:, :, 0], tensor_A[:, :, 1], tensor_A[:, :, 2])
    #     _, sigma, _ = torch.svd(A_, some=True)  # atten[N,N,num_heads],
    #     nuclear = torch.sum(sigma)
    #     return nuclear/3
    def calculate_FFT_TNN(self,three_way_tensor):
        # three_way_tensor = three_way_tensor.softmax(1)
        third_dimension_after_FFT = three_way_tensor.size()[-1] * self.args.fft_times
        tensor_A = torch.fft.fftn(three_way_tensor,third_dimension_after_FFT,dim=2)
        # tensor_A = three_way_tensor
        A_ = tensor_A[:, :, 0]
        for i in range(1, third_dimension_after_FFT):
            A_ = torch.block_diag(A_, tensor_A[:, :, i])
        _, sigma, _ = torch.svd(A_, some=True)  # atten[N,N,num_heads],
        nuclear = torch.sum(sigma)
        return nuclear/third_dimension_after_FFT   # Attention: this line is not same with function in magicSubschedulerLRR.py

    def calculate_Softmax_TNN(self,three_way_tensor):
        three_way_tensor = three_way_tensor.softmax(-1)
        third_dimension_after_FFT = three_way_tensor.size()[-1] * self.args.fft_times
        # tensor_A = torch.fft.fftn(three_way_tensor,third_dimension_after_FFT,dim=2)
        tensor_A = three_way_tensor
        A_ = tensor_A[:, :, 0]
        for i in range(1, third_dimension_after_FFT):
            A_ = torch.block_diag(A_, tensor_A[:, :, i])
        _, sigma, _ = torch.svd(A_, some=True)  # atten[N,N,num_heads],
        nuclear = torch.sum(sigma)
        return nuclear/third_dimension_after_FFT   # Attention: this line is not same with function in magicSubschedulerLRR.py


    def calculate_FFT_SNN(self,three_way_tensor, p=0.5):
        # three_way_tensor = three_way_tensor.softmax(1)
        third_dimension_after_FFT = three_way_tensor.size()[-1] * self.args.fft_times
        tensor_A = torch.fft.fftn(three_way_tensor,third_dimension_after_FFT,dim=2)
        # tensor_A = three_way_tensor
        A_ = tensor_A[:, :, 0]
        for i in range(1, third_dimension_after_FFT):
            A_ = torch.block_diag(A_, tensor_A[:, :, i])
        _, sigma, _ = torch.svd(A_, some=True)  # atten[N,N,num_heads],
        nuclear = torch.pow(torch.sum(torch.pow(sigma,p)),1/p)
        return nuclear/third_dimension_after_FFT

    def calculate_Tucker_SNN(self,three_way_tensor):
        mode0 = ten2mat(three_way_tensor, 0)
        mode1 = ten2mat(three_way_tensor, 1)
        mode2 = ten2mat(three_way_tensor, 2)
        return SNN(mode0) + SNN(mode1) + SNN(mode2)
