from time import time
import numpy as np
from numpy.core.fromnumeric import repeat
import scipy.io as sio
from torch_scatter import segment_sum_csr
from linear_operator.utils.linear_cg import linear_cg
import torch
from torch._C import dtype
import torch.sparse as ts
from torch_sparse import coalesce, spmm, SparseTensor, SparseStorage
from fast_repeat import fast_repeat
from fast_anchor import fast_anchor
from src.JPreconditioner import *
device = torch.device('cuda')


class Homo3D:

    # rewrited by penghao

    def __init__(self, nelx, nely, nelz, lx, ly, lz) -> None:

        self.__K_indices = None
        self.__F_indices = None

        self.__vK_hard=None
        self.__vK_hard=None

        self.__vF_hard=None
        self.__vF_hard=None

        self.__anchor_indices = None
        self.__K_mask = None
        self.__F_mask = None

        self.__K_sortidx = None
        self.__K_ptr = None

        self.__F_sortidx = None
        self.__F_ptr = None

        self.__lx = lx
        self.__ly = ly
        self.__lz = lz
        self.__nelx = nelx
        self.__nely = nely
        self.__nelz = nelz

        # self.__jacobi = None
        # jacobi = sio.loadmat('jacobi_indice.mat')
        # # jacobi_ = torch.from_numpy(jacobi['jacobi_index'])
        # self.__jacobi = torch.tensor(jacobi['jacobi_index'],dtype=torch.int64,device=device)
        # _, self.__jacobi = coalesce(self.__jacobi.contiguous().view(-1))
        # print(self.__jacobi.shape)
        # print(self.__jacobi)

        self.__jacobi_coo = None

        nel = nelx * nely * nelz
        nodeidx = torch.arange(0, nel, device=device).view(nelx, nely, nelz)

        index = torch.as_tensor([0], device=device)
        nodeidx = torch.cat(
            (nodeidx, torch.index_select(nodeidx, 0, index)), 0)
        nodeidx = torch.cat(
            (nodeidx, torch.index_select(nodeidx, 1, index)), 1)
        nodeidx = torch.cat(
            (nodeidx, torch.index_select(nodeidx, 2, index)), 2)

        node_list = [nodeidx[0:nelx, 0:nely, 0:nelz].reshape((nel, 1)),
                     nodeidx[1:nelx + 1, 0:nely, 0:nelz].reshape((nel, 1)),
                     nodeidx[1:nelx + 1, 1:nely + 1, 0:nelz].reshape((nel, 1)),
                     nodeidx[0:nelx, 1:nely + 1, 0:nelz].reshape((nel, 1)),
                     nodeidx[0:nelx, 0:nely, 1:nelz + 1].reshape((nel, 1)),
                     nodeidx[1:nelx + 1, 0:nely, 1:nelz + 1].reshape((nel, 1)),
                     nodeidx[1:nelx + 1, 1:nely + 1,
                     1:nelz + 1].reshape((nel, 1)),
                     nodeidx[0:nelx, 1:nely + 1, 1:nelz + 1].reshape((nel, 1))]
        self.__cellidx = torch.zeros(
            8, 3, nel, device=device, dtype=torch.int64)
        self.__cellseq = torch.zeros(nel, 8, device=device, dtype=torch.int64)
        for i in range(8):
            self.__cellidx[i] = self.index2xyz(node_list[i])
            self.__cellseq[:, i] = node_list[i].view(-1)

        self.__nodeidx = nodeidx

    @torch.no_grad()
    def index2xyz(self, index):
        x = index // (self.__nely * self.__nelz)
        temp = index % (self.__nely * self.__nelz)
        y = temp // self.__nely
        z = temp % self.__nely
        xyz = torch.cat((x, y, z), 1)
        return xyz.t()

    @torch.no_grad()
    def anchor(self, index=0):
        anchor = self.__nodeidx[self.__cellidx[index, 0, 0],
                                self.__cellidx[index, 1, 0], self.__cellidx[index, 2, 0]]

        mask = torch.eq(self.__cellseq, anchor)

        anchor_index = torch.arange(
            0, self.__nelx*self.__nely*self.__nelz, device=anchor.device)
        anchor_cell = anchor_index.masked_select(mask.sum(1).type(torch.bool))
        anchor_mask = mask[anchor_cell, :]

        if anchor_mask.dim() == 1:
            anchor_mask.unsqueeze_(0)

        anchor_mask = ~anchor_mask.unsqueeze(2).repeat(
            1, 1, 3).reshape(-1, 24).unsqueeze(2)
        anchor_mask = torch.as_tensor(
            anchor_mask, dtype=torch.float64, device=anchor_cell.device)
        K_mask = anchor_mask.bmm(anchor_mask.transpose(1, 2))
        K_diag = torch.eye(24, 24, dtype=K_mask.dtype, device=K_mask.device).unsqueeze(
            0).repeat(K_mask.shape[0], 1, 1)
        K_mask = torch.logical_or(K_mask, K_diag)
        F_mask = anchor_mask.repeat(1, 1, 6)

        self.__anchor_indices = torch.as_tensor(anchor_cell,dtype=torch.float,device=device)
        self.__K_mask = torch.as_tensor(K_mask,dtype=torch.float,device=device)
        self.__F_mask = torch.as_tensor(F_mask,dtype=torch.float,device=device)

        # return anchor_cell, K_mask, F_mask

    @torch.no_grad()
    def indices(self):
        n = self.__nelx * self.__nely * self.__nelz
        dof_indices = torch.empty(
            n, 8, 3, dtype=self.__cellseq.dtype, device=self.__cellseq.device)
        for i in range(3):
            dof_indices[:, :, i] = 3*self.__cellseq+i
        dof_indices = torch.as_tensor(
            dof_indices, dtype=torch.float, device=device).view(-1, 24).unsqueeze(2)

        # torch-sparse_solver version
        Kij = torch.zeros(2, 24 * 24 * n, device=device)
        temp = torch.ones(
            n, 24, 1, dtype=torch.float, device=device)
        Kij[0, :] = dof_indices.bmm(temp.transpose(1, 2)).contiguous().view(-1)
        Kij[1, :] = temp.bmm(dof_indices.transpose(1, 2)).contiguous().view(-1)

        Fij = torch.zeros(2, 24 * 6 * n, device=device)
        temp_F = torch.ones(n, 6, 1, dtype=torch.float, device=device)
        Fij[0, :] = dof_indices.bmm(
            temp_F.transpose(1, 2)).contiguous().view(-1)
        Fij[1, :] = torch.arange(0, 6, device=device).unsqueeze(0).unsqueeze(
            0).repeat(n, 24, 1).contiguous().view(-1)

        self.__K_indices = torch.as_tensor(Kij, dtype=torch.int64)
        # print(self.__K_indices.shape)
        self.__F_indices = torch.as_tensor(Fij, dtype=torch.int64)


    # def set_coalesce(self):
        nd = 3*self.__nelx * self.__nely * self.__nelz
        # coalesce+symmetry
        sorted_K,self.__K_sortidx=torch.cat([self.__K_indices[0]*nd+self.__K_indices[1], self.__K_indices[1]*nd+self.__K_indices[0]]).sort()
        sorted_K=torch.cat([-torch.ones(1,dtype=sorted_K.dtype,device=sorted_K.device),sorted_K])
        mask = sorted_K[1:] > sorted_K[:-1]
        self.__K_indices = self.__K_indices.repeat(1, 2)[:, self.__K_sortidx][:, mask]

        # print(self.__K_indices.shape)
        self.__K_ptr = mask.nonzero().flatten()
        self.__K_ptr = torch.cat([self.__K_ptr, self.__K_ptr.new_full((1, ),  mask.numel())])


        sorted_F,self.__F_sortidx=(self.__F_indices[0]*6+self.__F_indices[1]).sort()
        sorted_F=torch.cat([-torch.ones(1,dtype=sorted_F.dtype,device=sorted_F.device),sorted_F])
        mask = sorted_F[1:] >sorted_F[:-1]
        self.__F_indices = self.__F_indices[:, self.__F_sortidx][:, mask]
        self.__F_ptr = mask.nonzero().flatten()
        self.__F_ptr = torch.cat(
            [self.__F_ptr, self.__F_ptr.new_full((1, ),  mask.numel())])


        jacobi_row_col_coo = torch.empty(2,n,9,dtype=torch.int64,device=device)
        jacobi_row_col_coo[0,:,:] = self.__K_indices[0,self.__jacobi]
        jacobi_row_col_coo[1,:,:] = self.__K_indices[1,self.__jacobi]

        # print(jacobi_row_col_coo)

        # print(self.__K_ptr.shape)
        # print(self.__K_ptr)
        # block jacobi indices
        # start_t  = time()
        # jacobi_index = torch.empty(n,9,dtype=torch.int64,device=device)
        # for i in range(n):
        #     # print(i)
        #     idx = torch.empty(9,dtype=torch.int64,device=device)
        #     # for j in range(self.__K_ptr[3*i], self.__K_ptr[3*i+1]):
        #     #     if self.__K_indices[1,j] == 3*i:
        #     #         idx[0] = j
        #     #         idx[1] = j+1
        #     #         idx[2] = j+2
        #     #         break
        #     # for j in range(self.__K_ptr[3*i+1], self.__K_ptr[3*i+2]):
        #     #     if self.__K_indices[1,j] == 3*i+1:
        #     #         idx[3] = j
        #     #         idx[4] = j+1
        #     #         idx[5] = j+2
        #     #         break
        #     # for j in range(self.__K_ptr[3*i+2], len(self.__K_ptr)):
        #     #     if self.__K_indices[1,j] == 3*i+2:
        #     #         idx[6] = j
        #     #         idx[7] = j+1
        #     #         idx[8] = j+2
        #     #         break

        #     idx[0] = ((self.__K_indices[0] == 3*i) & (self.__K_indices[1] == 3*i)).nonzero(as_tuple=False)
        #     # print(idx[0])
        #     idx[1] = ((self.__K_indices[0] == 3*i) & (self.__K_indices[1] == 3*i+1)).nonzero(as_tuple=False)
        #     idx[2] = ((self.__K_indices[0] == 3*i) & (self.__K_indices[1] == 3*i+2)).nonzero(as_tuple=False)

        #     idx[3] = ((self.__K_indices[0] == 3*i+1) & (self.__K_indices[1] == 3*i)).nonzero(as_tuple=False)
        #     idx[4] = ((self.__K_indices[0] == 3*i+1) & (self.__K_indices[1] == 3*i+1)).nonzero(as_tuple=False)
        #     idx[5] = ((self.__K_indices[0] == 3*i+1) & (self.__K_indices[1] == 3*i+2)).nonzero(as_tuple=False)

        #     idx[6] = ((self.__K_indices[0] == 3*i+2) & (self.__K_indices[1] == 3*i)).nonzero(as_tuple=False)
        #     idx[7] = ((self.__K_indices[0] == 3*i+2) & (self.__K_indices[1] == 3*i+1)).nonzero(as_tuple=False)
        #     idx[8] = ((self.__K_indices[0] == 3*i+2) & (self.__K_indices[1] == 3*i+2)).nonzero(as_tuple=False)
        #     jacobi_index[i] = idx.contiguous().view(-1)
        # self.__jacobi = jacobi_index
        # end_t = time()
        # print(end_t - start_t)
        # print(self.__jacobi.shape)
        # print(self.__jacobi)
        # print(jacobi_index.shape)
        # print(jacobi_index)
        # sio.savemat('jacobi_indice.mat',{'jacobi_index':jacobi_index.cpu().numpy()})

    @torch.no_grad()
    def get_K_indices(self):
        return self.__K_indices

    @torch.no_grad()
    def symcoalesce(self, value):
        return segment_sum_csr(value.repeat(2)[self.__K_sortidx]*0.5, self.__K_ptr)
    
    @torch.no_grad()   
    def coalesce(self, value):
        value_ = segment_sum_csr(value[self.__F_sortidx], self.__F_ptr)
        return value_

    @torch.no_grad()
    def full_assembly(self, voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft):
        # start_ = time()
        n = self.__nelx * self.__nely * self.__nelz
       
        # vK = torch.empty(n, 24, 24, dtype=Ke_hard.dtype, device=Ke_hard.device)
        # vF = torch.empty(n, 24, 6, dtype=Fe_hard.dtype, device=Fe_hard.device)
       
        # n_hard = voxel.sum()
        # n_soft = n-n_hard
        
        # hard_mask = torch.as_tensor(
        #     voxel, dtype=torch.bool, device=voxel.device).contiguous().view(-1)
        
        # end_ = time()
        # c = (end_ - start_)
        # print("before value time:{}".format(c))

        # start_ = time()
        voxel_=torch.as_tensor(voxel,dtype=torch.float,device=voxel.device).contiguous()
        vK=fast_repeat(voxel_,Ke_hard.contiguous(),Ke_soft.contiguous())
        vF=fast_repeat(voxel_,Fe_hard.contiguous(),Fe_soft.contiguous())
        # end_ = time()
        # c = (end_ - start_)
        # print("fast repeat time:{}".format(c))


        # start_ = time()
        # vK[hard_mask, :, :] = Ke_hard.repeat(n_hard, 1, 1)
        # vF[hard_mask, :, :] = Fe_hard.repeat(n_hard, 1, 1)
        # vK[~hard_mask, :, :] = Ke_soft.repeat(n_soft, 1, 1)
        # vF[~hard_mask, :, :] = Fe_soft.repeat(n_soft, 1, 1)

        # print((vK-vK_<1e-10).all())
        # print((vF-vF_<1e-10).all())
        # end_ = time()
        # c = (end_ - start_)
        # print("old repeat time:{}".format(c))

        # start_ = time()
        # # new anchor by set F(anchor)->0
        # for idx in range(self.__K_mask.shape[0]):
        #     vK[self.__anchor_indices[idx], :, :] = vK[self.__anchor_indices[idx], :, :] * \
        #         self.__K_mask[idx, :, :]
        #     vF[self.__anchor_indices[idx], :, :] = vF[self.__anchor_indices[idx], :, :] * \
        #         self.__F_mask[idx, :, :]
       
        fast_anchor(vK,self.__anchor_indices,self.__K_mask)
        fast_anchor(vF,self.__anchor_indices,self.__F_mask)
       
        # end_ = time()
        # c = (end_ - start_)
        # print("anchor time:{}".format(c))

        # K = torch.sparse_coo_tensor(
        #     self.__K_indices, vK, (3 * n, 3 * n), device=Ke_hard.device).coalesce()

        # K = 0.5 * (K + K.t()).coalesce()
        # F = torch.sparse_coo_tensor(
        #     self.__F_indices, vF, (3 * n, 6), device=Fe_hard.device).coalesce()

        # self.set_coalesce()

        # print((K.indices()-self.__K_indices==0).all())
        # print((F.indices()-self.__F_indices==0).all())


        
        
        vK_=self.symcoalesce(vK.contiguous().view(-1))
        vF_=self.coalesce(vF.contiguous().view(-1)).view(-1,6)

        # # print((vK_-K.values()<1e-8).all())
        # print((vF_-F.to_dense()<1e-9).all())


        # print(vK.shape)
        # P=JacobPreconditioner(self.__K_indices,self.__jacobi,vK_,n)
        # # P=ColNormPreconditioner(Kij,vK,3*n,0)
        # # P=ColNormPreconditioner(Kij,vK,3*n,2)
        # # P=ColNormPreconditioner(Kij,vK,3*n,float('inf'))
        # P.apply(self.__K_indices,vK_.contiguous().view(-1),3*n,3*n)
        # P.apply(self.__F_indices,vF_.contiguous().view(-1),3*n,6)
        

        # K=SparseTensor(row=self.__K_indices[0],col=self.__K_indices[1],value=vK,sparse_sizes=(3*n,3*n)).coalesce().to_symmetric().to_torch_sparse_coo_tensor()
        # F=SparseStorage(row=self.__F_indices[0],col=self.__F_indices[1],value=vF,sparse_sizes=(3*n,6)).coalesce().value().view(3*n,6)

        
        return vK_, vF_


    @torch.no_grad()
    def homo_new(self, voxel, U, ke_hard, fe_hard):
        solid_seq = self.__cellseq[voxel.type(
            torch.bool).contiguous().view(-1), :]
        n = solid_seq.shape[0]

        volume = self.__lx * self.__ly * self.__lz

        # reshape U(18,N,N,N)->u(6,3,N^3),then transpose u(6,3,N^3)->u(6,N^3,3) 
        u_ = U.contiguous().view(6, 3, -1).transpose(1, 2)
        # reshape u(6,N^3,3)->u(6,N^3*3),transpose u(6,N^3*3)->u(N^3*3,6)
        u_ = u_.contiguous().view(6, -1).t()

        # index_u=self.__cellidx.movedim(2,0).contiguous().view(-1).to(U.device)

        index_u = torch.empty(n, 8, 3, dtype=torch.int64, device=U.device)
        for i in range(3):
            index_u[:, :, i] = 3 * solid_seq + i
        index_u = index_u.contiguous().view(-1)

        u = u_[index_u, :].contiguous().view(n, 24, 6)
        del index_u

        CH = torch.zeros(6, 6, dtype=ke_hard.dtype, device=ke_hard.device)

        idx = torch.ones(24, dtype=torch.bool, device=ke_hard.device)
        idx[[0, 1, 2, 4, 5, 11]] = False

        X0 = torch.zeros_like(fe_hard)

        X0[idx, :] = torch.inverse(ke_hard[idx, :][:, idx])@fe_hard[idx, :]

        L = X0-u
        CH = torch.addbmm(CH, L.transpose(1, 2), ke_hard@L)

        return 1 / volume * CH

    @torch.no_grad()
    def solve_by_torch(self, voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft, tol=1e-3, maxit=5000):
        """solve Ku=f by pytorch using linear conjugate gradient method

        Args:
            voxel (torch.cuda.FloatTensor): Voxel, it should be N*N*N 
            Ke (torch.cuda.FloatTensor): Elelment stiffness matrix, it should be 24*24 
            Fe (torch.cuda.FloatTensor): Elelment macrostrain-force matrix, it should be 24*6 
            tol (float, optional): Linear cg tolerance. Defaults to 1e-3.
            maxit (int, optional): Maximum iteration number. Defaults to 1000.

        Returns:
            U (torch.cuda.FloatTensor): it should be 18*N*N*N
        """
        #K, F = self.full_assembly(voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft)
        nd = 3*self.__nelx * self.__nely * self.__nelz
        vK,F=self.full_assembly(voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft)


        def Kmm(rhs): return spmm(self.__K_indices,vK,nd,nd,rhs)

        # Kmm= lambda rhs: spmm(self.__K_indices,vK,nd,nd,rhs)

        # def Kmm(rhs): return ts.mm(K, rhs)
        X = linear_cg(Kmm, F, tolerance=tol, max_iter=maxit)

        # transpose u(6,N^3*3)<-u(N^3*3,6),reshape u(6,N^3,3)-<u(6,N^3*3)
        u = X.t().contiguous().view(6, -1, 3)
        # transpose u(6,3,N^3)<-u(6,N^3,3),reshape U(18,N,N,N)<-u(6,3,N^3),
        u = u.transpose(1, 2).contiguous().view(
            18, self.__nelx, self.__nely, self.__nelz)
        return u

    @torch.no_grad()
    def MPE_full(self, voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft, U):
        """compute minimal potential energy according to 0.5*U^T@K@U-U^T@F

        Args:
           voxel (torch.cuda.FloatTensor): Voxel, it should be N*N*N 
           Ke (torch.cuda.FloatTensor): Elelment stiffness matrix, it should be 24*24 
           Fe (torch.cuda.FloatTensor): Elelment macrostrain-force matrix, it should be 24*6 
           U (torch.cuda.FloatTensor): it should be 18*N*N*N

        Returns:
            energy(float) :  minimal potential energy
        """
        nd = 3*self.__nelx * self.__nely * self.__nelz
        # start_=time()
        # K, F = self.full_assembly(voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft)
        vK, F = self.full_assembly(voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft)
        # end_ = time()
        # c = (end_ - start_)
        # print("assemble time:{}".format(c))
        start_=time()
        u = U.contiguous().view(6, 3, -1).transpose(1, 2)
        u = u.contiguous().view(6, -1).t()

        # Ku = ts.mm(K, u)
        
        Ku=spmm(self.__K_indices,vK,nd,nd,u)
        
        grad = (Ku - F).t().contiguous().view(6, -1, 3).transpose(1,
                                                                  2).contiguous().view(18, self.__nelx, self.__nely, self.__nelz)
      
        energy = ((u.t() @ (0.5*Ku - F)) *
                  torch.eye(6, 6, dtype=u.dtype, device=u.device)).sum()  # .item()
        # end_ = time()
        # c = (end_ - start_)
        # print("energy time:{}".format(c))
        return energy, grad

    
