# ! env python

import torch
from torch.functional import norm
from torch.nn import Parameter
from torch_geometric.nn.inits import glorot, normal, zeros, ones
import torch.nn.functional as F

from GNNSwitch.nn.functional.custom import gcn_propagate
from .nn.functional import PyOldGCN as OPyGCN
from .nn.functional import PyFusedLinearGCN as PyLGCN
from .nn.functional import PyChunkGCN
from .nn.functional import PyGCOOGCN

USE_REVERSE_GRAPH = False

class GCNConv(torch.nn.Module):
   def __init__(self, in_channels, out_channels, 
             normalize = True, use_cache = True, with_bias = True):

      super(GCNConv, self).__init__()
      self.op = torch.ops.helpers
      self.normalize = normalize
      self.use_cache = use_cache
      self.cached = False
      self.linear_first = in_channels > out_channels

      self.linear_weight = Parameter(torch.Tensor(out_channels, in_channels))
      if with_bias:
         self.linear_bias = Parameter(torch.Tensor(out_channels))
      else:
         self.register_parameter('linear_bias', None)

      self.reset_parameter()
      # print("using {} graph".format("reverse" if USE_REVERSE_GRAPH else "single"))

   
   def reset_parameter(self):
      glorot(self.linear_weight)
      zeros(self.linear_bias)

   
   def forward(self, x, graph):
      if self.linear_first:
         msg = F.linear(x, self.linear_weight)
      else:
         msg = x

      if self.normalize:
         if not self.cached:
            ew_norm = self.op.gcn_norm_csr(graph.row_offset, 
                                           graph.adj_list, 
                                           graph.odegree, 
                                           graph.edge_val)
            r_ew_norm = self.op.gcn_norm_csr(graph.r_row_offset, 
                                             graph.r_adj_list, 
                                             graph.idegree, 
                                             graph.r_edge_val)
            if self.use_cache:
               self.cached = True
               # self.ew_norm = ew_norm
               # self.r_ew_norm = r_ew_norm
               graph.edge_val = ew_norm
               graph.r_edge_val = r_ew_norm
      #    else:
      #       ew_norm = self.ew_norm
      #       r_ew_norm = self.r_ew_norm
      # else:
      #    ew_norm = graph.edge_val
      #    r_ew_norm = graph.r_edge_val

      # out = self.op.custom_gcn(msg, graph.row_offset, graph.adj_list, ew_norm)#,
      # if self.use_reverse:
      # out_ = OPyGCN(msg,
      #                graph.sorted_ridx, graph.row_offset, graph.adj_list, ew_norm, 
      #                graph.sorted_cidx, graph.r_row_offset, graph.r_adj_list, r_ew_norm)
      out_ = gcn_propagate(graph, msg)
      # else:
      #    out_ = PyGCN(msg, graph.sorted_ridx, graph.sorted_cidx, graph.row_offset, graph.adj_list, ew_norm)
      
      if not self.linear_first:
         out = F.linear(out_, self.linear_weight)
      else:
         out = out_

      if not self.linear_bias is None:
         out += self.linear_bias
      return out

   def __repr__(self) -> str:
       return f"{self.__class__.__name__}(weight={self.linear_weight.size()}, bias={self.linear_bias.size()})"


class GCOOConv(torch.nn.Module):
   def __init__(self, in_channels, out_channels, normalize = True, use_cache = True, with_bias = True):
      super(GCOOConv, self).__init__()
      self.op = torch.ops.helpers
      self.normalize = normalize
      self.use_cache = use_cache
      self.cahced = False
      self.linear_first = in_channels > out_channels
      
      self.linear_weight = Parameter(torch.Tensor(out_channels, in_channels))

      if with_bias:
         self.linear_bias = Parameter(torch.Tensor(out_channels))
      else:
         self.register_parameter('linear_bias', None)

      self.reset_parameter()

   def reset_parameter(self):
      glorot(self.linear_weight)
      zeros(self.linear_bias)
   
   def forward(self, x, graph):
      if self.linear_first:
         msg = F.linear(x, self.linear_weight)
      else:
         msg = x

      if self.use_cache:
         if not self.cached:
            ew_norm = self.op.gcn_norm_coo(graph.edge_list, graph.weight_list)
            if self.use_cache:
               self.cached = True
               self.ew_norm = ew_norm
         else:
            ew_norm = self.ew_norm
      else:
         ew_norm = graph.weight_list
      
      out_ = PyGCOOGCN(msg, graph.edge_list, ew_norm, graph.grp_offset)

      if not self.linear_first:
         out = F.linear(out_, self.linear_weight)
      else:
         out = out_

      if not self.linear_bias is None:
         out += self.linear_bias

      return out

   def __repr__(self) -> str:
       return f"{self.__class__.__name__}(weight={self.linear_weight.size()}, bias={self.linear_bias.size()})"
   

class FGCNConv(torch.nn.Module):
   def __init__(self, in_channels, out_channels, 
                normalize = True, use_cache = True, with_bias = True):

      super(FGCNConv, self).__init__()
      self.op = torch.ops.helpers

      self.normalize = normalize
      self.use_cache = use_cache
      self.cached = False

      self.linear_weight = Parameter(torch.Tensor(out_channels, in_channels))
      if with_bias:
         self.linear_bias = Parameter(torch.Tensor(out_channels))
      else:
         self.register_parameter('linear_bias', None)

      self.reset_parameter()
   
   def reset_parameter(self):
      glorot(self.linear_weight)
      if not self.linear_bias is None:
         zeros(self.linear_bias)

   def forward(self, x, graph):
      if self.normalize:
         if not self.cached:
            ew_norm = self.op.gcn_norm_csr(graph.row_offset, graph.adj_list, graph.odegree, graph.edge_val)
            r_ew_norm = self.op.gcn_norm_csr(graph.r_row_offset, graph.r_adj_list, graph.idegree, graph.r_edge_val)
            if self.use_cache:
               self.cached = True
               self.ew_norm = ew_norm
               self.r_ew_norm = r_ew_norm
         else:
            ew_norm = self.ew_norm
            r_ew_norm = self.r_ew_norm
      else:
         ew_norm = graph.edge_val
         r_ew_norm = graph.r_edge_val
         
      # out = self.op.fuse_linear_gcn(graph.row_offset, graph.adj_list, ew_norm,
      out = PyLGCN(graph.row_offset, graph.adj_list, ew_norm,
                             graph.r_row_offset, graph.r_adj_list, r_ew_norm,
                             x, self.linear_weight, self.linear_bias)
      return out

   def __repr__(self) -> str:
      return f"{self.__class__.__name__}(weight={self.linear_weight.size()}, bias={self.linear_bias.size()})"



class ChGCNConv(torch.nn.Module):
   def __init__(self, in_channels, out_channels, with_bias = True):
      super(ChGCNConv, self).__init__()

      self.linear_weight = Parameter(torch.Tensor(out_channels, in_channels))
      if with_bias:
         self.linear_bias = Parameter(torch.Tensor(out_channels))
      else:
         self.register_parameter('linear_bias', None)

      self.reset_parameter()
      # print("using {} graph".format("reverse" if USE_REVERSE_GRAPH else "single"))

   def reset_parameter(self):
      glorot(self.linear_weight)
      zeros(self.linear_bias)
      
   def forward(self, x, chg):
      msg = F.linear(x, self.linear_weight)
      out = PyChunkGCN(msg, chg.blockOffsetSdat, chg.blockOffsetEdat, chg.blockOffsetSb, chg.heavyTiledEdge, chg.heavyTiledWeight,
                   chg.blockOffsetSbDn, chg.denseMatDat, 
                   chg.blockOffsetElst, chg.liteSrcLst, chg.liteDstPosSb, chg.liteWeightLst,
                   chg.blockOffsetDlst, chg.aggDst,
                   chg.denseChunks, chg.heavyChunks, chg.liteChunks, chg.strmBufSize)
      
      if not self.linear_bias is None:
         out += self.linear_bias
      return out

   def __repr__(self) -> str:
       return f"{self.__class__.__name__}(weight={self.linear_weight.size()}, bias={self.linear_bias.size()})"