from functools import reduce
from math import sqrt
from typing import Callable, Tuple

import torch
from torch._C import device
from torch.autograd.function import Function
from torch.distributions import Categorical
from torch.nn.modules.container import ModuleList
from torch.nn.modules.linear import Linear
import torch.optim
import torch.nn
from torch.nn.functional import linear
from torch.functional import Tensor
from torch.nn import Module
from torch.optim import Optimizer
from torch.nn.parameter import Parameter
from projector import Projector
from torch.nn.init import xavier_normal_

class Head(Module):
    """
    During the forward pass it takes a random convex combination of heads before doing the multiplication. This is similar to the 
    REM algorithm from the 'An Optimistic Perspective on Offline Reinforcement Learning' except here I am using it with categorical
    probability vectors.

    Unlike for a linear layer, the output is a 3d (batch, action, support) tensor.
    """
    def __init__(self,dim_input,dim_action,dim_support,dim_head=8,track_errors=False):
        super().__init__()
        self.track_errors = track_errors
        self.dim_head, self.dim_action, self.dim_support = dim_head, dim_action, dim_support
        self.weight = Parameter(xavier_normal_(torch.empty(dim_action*dim_support,dim_input,dim_head),sqrt(dim_head)))
        self.bias = Parameter(torch.zeros(dim_action*dim_support,dim_head))
        if track_errors:
            self.square_error = Parameter(torch.scalar_tensor(0.0),requires_grad=False)
            self.square_error_count = Parameter(torch.scalar_tensor(0.0),requires_grad=False)

    def mse_clear(self): self.square_error.fill_(0.0); self.square_error_count.fill_(0.0)
    @property 
    def mse(self): return (self.square_error / self.square_error_count).sqrt_()
    @property
    def mse_and_clear(self): x = self.mse; self.mse_clear(); return x
        
    def forward(self,x : Tensor):
        if self.dim_head == 1:
            weight, bias = self.weight.squeeze(-1), self.bias.squeeze(-1)
        elif self.training:
            combination = torch.rand(self.dim_head,device=x.device)
            combination /= combination.sum(-1,keepdim=True)
            weight = torch.einsum('oih,h->oi',self.weight,combination)
            bias = torch.einsum('oh,h->o',self.bias,combination)
        else:
            weight, bias = self.weight.mean(-1), self.bias.mean(-1)
        return linear(x,weight,bias).view(-1,self.dim_action,self.dim_support)

class SignSGD(Optimizer):
    """
    Does a step in the direction of the sign of the gradient.
    """
    def __init__(self,params,lr=2 ** -10,momentum_decay=0.0,weight_decay=1.0):
        super().__init__(params,dict(lr=lr,momentum_decay=momentum_decay,weight_decay=weight_decay))

    @torch.no_grad()
    def step(self):
        for gr in self.param_groups:
            momentum_decay, lr, weight_decay = gr['momentum_decay'], gr['lr'], gr['weight_decay']

            for x in gr['params']:
                if weight_decay != 1.0: x *= weight_decay
                if x.grad is not None:
                    if momentum_decay == 0.0: # Self play works better without momentum.
                        x -= torch.sign(x.grad).mul_(lr)
                    else:
                        x_state = self.state.get(x)
                        if x_state is None: x_state = {}; self.state[x] = x_state
                        m = x_state.get('momentum_params')
                        if m is None: m = torch.zeros_like(x); x_state['momentum_params'] = m
                        m *= momentum_decay; m += x.grad
                        x -= torch.sign(m).mul_(lr)

def normed(o : Tensor,dim= -1): return o / o.sum(dim=dim,keepdim=True).clamp_min(1e-38)
def normed_abs(x : Tensor,dim=- 1): return normed(x.abs(),dim)
def normed_abs_cube(x : Tensor,dim= -1): return normed((x ** 3).abs(),dim)
def normed_square(x : Tensor,dim= -1): return normed(x.square(),dim)
def inf(o : Tensor,dim= -1) -> Tensor: return o / o.norm(p=float('inf'),dim=dim,keepdim=True).clamp_min(1e-38)
def inf_cube(x : Tensor,dim= -1): return inf(x ** 3,dim)

class InfCube(Linear):
    def forward(self,x): return inf_cube(super().forward(x))

class ResInfCube(InfCube):
    def __init__(self,size): super().__init__(size,size)
    def forward(self,x): return super().forward(x) + x

class DenseInfCube(Linear):
    def forward(self,x): return torch.cat((inf_cube(x),x),1)

class Rotary(Module):
    def __init__(self,dim_in):
        super().__init__()
        self.dim_in = dim_in
        self.freq = Parameter(torch.empty(dim_in),requires_grad=False)
        freq = 10000 ** torch.arange(0, -1, -2 / dim_in, dtype=torch.float)
        torch.cat((freq, freq[:freq.shape[0] - self.dim_in % 2]),out=self.freq) # Doing it like this to avoid memory fragmentation when initing the embedding.

    def make_emb(self,dim_seq):
        i = torch.arange(dim_seq, dtype=self.freq.dtype, device=self.freq.device)
        emb = (i.view(-1,1) * self.freq.view(1,-1)).view(1,dim_seq,1,self.dim_in)
        return emb.cos(), emb.sin()

    def rotate_half(self,x : Tensor):
        x1, x2 = x.chunk(2,-1)
        return torch.cat((-x2, x1), dim = -1)

    def forward(self,x,emb_cos,emb_sin):
        return x * emb_cos + self.rotate_half(x) * emb_sin

class TopEncoderBase(Module):
    def __init__(self,dim_in,dim_head=2 ** 3,dim_emb=2 ** 5):
        super().__init__()
        self.dim_in, self.dim_head, self.dim_emb = dim_in, dim_head, dim_emb
        self.proj_q = Linear(dim_in,dim_head*dim_emb)
        self.proj_kv = Linear(dim_in,2*dim_head*dim_emb)

    def forward(self,x,rotary,mask=None) -> Tensor:
        dz,dk,_ = x.shape; dh,de = self.dim_head,self.dim_emb
        kv : Tensor = self.proj_kv(x).view(dz,dk,2,dh,de)
        q,k,v = self.proj_q(x[:,0]).view(dz,dh,de), rotary(kv[:,:,0]), inf_cube(kv[:,:,1])
        seq_weights : Tensor = torch.einsum('zhe,zkhe->zkh',q,k)
        if mask is not None:
            seq_weights = seq_weights.masked_fill(mask.view(dz,dk,1),0.0)
        return torch.einsum('zkh,zkhe->zhe',normed_square(seq_weights,-2),v).reshape(dz,dh*de)

class TopEncoder(TopEncoderBase):
    def __init__(self,dim_in,dim_head=2 ** 3,dim_emb=2 ** 5):
        super().__init__(dim_in,dim_head,dim_emb)
        self.rotary = Rotary(dim_emb)

    def forward(self,x,mask=None):
        emb = self.rotary.make_emb(x.shape[1])
        return super().forward(x,lambda x: self.rotary(x,*emb),mask)

class EncoderBase(Module):
    def __init__(self,dim_in,dim_head=2 ** 3,dim_emb=2 ** 5):
        super().__init__()
        self.dim_in, self.dim_head, self.dim_emb = dim_in, dim_head, dim_emb
        self.proj_in = Linear(dim_in,3*dim_head*dim_emb)

    def forward(self,x,rotary,mask=None) -> Tensor:
        dz,dk,_ = x.shape; dh,de = self.dim_head,self.dim_emb
        qkv : Tensor = self.proj_in(x).view(dz,dk,3,dh,de)
        q,k,v = rotary(qkv[:,:,0]), rotary(qkv[:,:,1]), inf_cube(qkv[:,:,2])
        seq_weights : Tensor = torch.einsum('zqhe,zkhe->zqkh',q,k)
        if mask is not None:
            seq_weights = seq_weights.masked_fill(torch.logical_or(mask.view(dz,dk,1,1),mask.view(dz,1,dk,1)),0.0)
        return torch.einsum('zqkh,zkhe->zqhe',normed_square(seq_weights,-2),v).reshape(dz,dk,dh*de)

class Encoder(EncoderBase):
    def __init__(self,dim_in,dim_head=2 ** 3,dim_emb=2 ** 5):
        super().__init__(dim_in,dim_head,dim_emb)
        self.rotary = Rotary(dim_emb)

    def forward(self,x,mask=None):
        emb = self.rotary.make_emb(x.shape[1])
        return super().forward(x,lambda x: self.rotary(x,*emb),mask)

class ResEncoderBase(EncoderBase):
    def __init__(self,dim_head=2 ** 3,dim_emb=2 ** 5):
        super().__init__(dim_head*dim_emb,dim_head,dim_emb)

    def forward(self,x,rotary,mask=None):
        return x + super().forward(x,rotary,mask)

class EncoderList(Module):
    def __init__(self,depth,dim_head=2 ** 3,dim_emb=2 ** 5,dim_in=None,dim_top_head=None):
        super().__init__()
        self.rotary = Rotary(dim_emb)
        self.initial = EncoderBase(dim_in,dim_head,dim_emb) if dim_in is not None else None
        self.layers = ModuleList([ResEncoderBase(dim_head,dim_emb) for _ in range(depth)])
        self.top = TopEncoderBase(dim_head*dim_emb,dim_head if dim_top_head is None else dim_top_head,dim_emb)
        
    def forward(self,x,mask=None):
        emb = self.rotary.make_emb(x.shape[1])
        def rotary(x): return self.rotary(x,*emb)
        x = reduce(lambda x,lay:lay(x,rotary,mask),self.layers,self.initial(x,rotary,mask) if self.initial is not None else x)
        return self.top(x,rotary,mask)

class CatPow(Function):
    """
    Numerically stable alternative of log_softmax for absolute normalized powers.
    """

    @staticmethod
    def forward(ctx,qinit : Tensor,e : int or float,dim=-1):
        assert isinstance(e,float) or isinstance(e,int), "Expected the exponent to be a constant float or integer."
        q = qinit.abs()
        qp = q.pow(e)
        qpsum = qp.sum(dim,keepdim=True)
        ctx.e, ctx.dim = e, dim
        ctx.save_for_backward(qinit,q,qpsum)
        return (qp / qpsum).pow(1/e)

    @staticmethod
    def backward(ctx, g_qn):
        qinit,q,qpsum = ctx.saved_tensors
        e,dim = ctx.e, ctx.dim
        g_q = None
        if ctx.needs_input_grad[0]:
            g_q = qinit.sign().mul_(g_qn - (q if e == 2 else q.pow(e-1)) * ((g_qn * q).sum(dim,keepdim=True) / qpsum)).div_(qpsum.pow(1/e))
        return g_q, None

catpow : Callable[[Tensor,int],Tensor] = CatPow().apply

def sample_value_probs(proj : Projector, value_probs : Tensor):
    sample_indices = Categorical(value_probs).sample()
    return proj.support[sample_indices.flatten()].view(sample_indices.shape)

def model_explore(
        proj : Projector, value : Module, value_head : Head, policy : Module,policy_head : Linear,
        is_update_value : bool, is_update_policy : bool,
        policy_data : Tensor, policy_mask : Tensor, value_data : Tensor, value_mask : Tensor, action_mask : Tensor, pids : Tensor
        ):
    """
    Does SARSA with categorical distributions.
    """
    sample_indices = torch.masked_fill(torch.normal(0.0,1.0,action_mask.shape),action_mask,float('-inf')).argmax(-1)
    def update(rewards : Tensor, regret_probs : Tensor):
        nonlocal value_data, value_mask, policy_data, policy_mask, action_mask, pids, sample_indices
        sample_indices = sample_indices.cuda()
        value_data, value_mask, action_mask, pids = value_data.cuda(), value_mask.cuda(), action_mask.cuda(), pids.cuda().view(-1,1)
        value_raw : Tensor = value(value_data,mask=value_mask)
        value_head_raw = value_head.forward(value_raw)
        value_probs = normed_square(value_head_raw).detach()

        policy_data, policy_mask = policy_data.cuda(), policy_mask.cuda()
        action_probs = normed_square(torch.masked_fill(policy_head(policy(policy_data,mask=policy_mask)),action_mask,0.0))
        values = proj.mean(value_probs)
        if is_update_value or is_update_policy:
            regret_probs = regret_probs.cuda().view(-1,1)
        if is_update_value:
            def index_selected_actions(x : Tensor): return x[torch.arange(0,action_probs.shape[0]),sample_indices]
            if value_head.track_errors:
                value_head.square_error += (((index_selected_actions(values) - proj.mean(rewards)) * rewards.sum(-1)).square().unsqueeze(-1) * regret_probs).sum()
                value_head.square_error_count += regret_probs.sum()
            ((catpow(index_selected_actions(value_head_raw),2) - rewards.sqrt()).abs().pow(3) * regret_probs).sum().backward()
            # ((normed_square(index_selected_actions(value_head_raw)).sqrt() - rewards.sqrt()).abs().pow(3) * regret_probs).sum().backward() # These two are eqivalent apart from catpow being well defined when distribution probs are 0.
        if is_update_policy:
            action_probs.backward(values * -(pids * regret_probs))
        return torch.einsum('bav,ba->bv',value_probs,action_probs.detach())
        # return torch.zeros_like(rewards) # Only trains on the last step.
    return None, sample_indices.cpu().numpy(), update

def model_exploit(
        proj : Projector, value : Module, value_head : Head, policy : Module,policy_head : Linear,
        is_update_value : bool, is_update_policy : bool,
        policy_data : Tensor, policy_mask : Tensor, value_data : Tensor, value_mask : Tensor, action_mask : Tensor, pids : Tensor
        ):
    """
    Does MC value propagation. Uses the actual policy for exploration.
    """
    policy_data, policy_mask, action_mask = policy_data.cuda(), policy_mask.cuda(), action_mask.cuda()
    action_probs = normed_square(torch.masked_fill(policy_head(policy(policy_data,mask=policy_mask)),action_mask,0.0))
    sample_indices = Categorical(action_probs).sample()
    def update(rewards : Tensor, regret_probs : Tensor): # As long as training is not done, it can be used to propagate scalar reward arrays as well.
        if is_update_value or is_update_policy:
            nonlocal value_data, value_mask, pids
            value_data, value_mask, pids = value_data.cuda(), value_mask.cuda(), pids.cuda().view(-1,1)
            regret_probs = regret_probs.cuda().view(-1,1)
            value_raw : Tensor = value(value_data,mask=value_mask)
            value_head_raw = value_head.forward(value_raw)
            value_probs = value_head_raw.softmax(-1).detach()
            values = proj.mean(value_probs)
            if is_update_value:
                def index_selected_actions(x : Tensor): return x[torch.arange(0,values.shape[0]),sample_indices]
            if value_head.track_errors:
                value_head.square_error += (((index_selected_actions(values) - proj.mean(rewards)) * rewards.sum(-1)).square().unsqueeze(-1) * regret_probs).sum()
                value_head.square_error_count += regret_probs.sum()
            ((catpow(index_selected_actions(value_head_raw),2) - rewards.sqrt()).abs().pow(3) * regret_probs).sum().backward()
            # ((normed_square(index_selected_actions(value_head_raw)).sqrt() - rewards.sqrt()).abs().pow(3) * regret_probs).sum().backward() # These two are eqivalent apart from catpow being well defined when distribution probs are 0.
            if is_update_policy:
                action_probs.backward(values * -(pids * regret_probs))
        return rewards
    return action_probs.detach().cpu().numpy(), sample_indices.cpu().numpy(), update