import torch
import torch.sparse as ts
from torch_sparse import coalesce
device = torch.device('cuda')
from linear_operator.utils.linear_cg import linear_cg
import scipy.io as sio 
from time import time  
from src.preconditioner import *


class Homo3D:

    # rewrited by penghao

    def __init__(self, nelx, nely, nelz, lx, ly, lz, writer=None) -> None:

        self.writer = writer
        self.__lx = lx
        self.__ly = ly
        self.__lz = lz
        self.__nelx = nelx
        self.__nely = nely
        self.__nelz = nelz

        nel = nelx * nely * nelz
        nodeidx = torch.arange(0, nel, device=device).view(nelx, nely, nelz)

        index = torch.as_tensor([0], device=device)
        nodeidx = torch.cat(
            (nodeidx, torch.index_select(nodeidx, 0, index)), 0)
        nodeidx = torch.cat(
            (nodeidx, torch.index_select(nodeidx, 1, index)), 1)
        nodeidx = torch.cat(
            (nodeidx, torch.index_select(nodeidx, 2, index)), 2)

        node_list = [nodeidx[0:nelx, 0:nely, 0:nelz].reshape((nel, 1)),
                     nodeidx[1:nelx + 1, 0:nely, 0:nelz].reshape((nel, 1)),
                     nodeidx[1:nelx + 1, 1:nely + 1, 0:nelz].reshape((nel, 1)),
                     nodeidx[0:nelx, 1:nely + 1, 0:nelz].reshape((nel, 1)),
                     nodeidx[0:nelx, 0:nely, 1:nelz + 1].reshape((nel, 1)),
                     nodeidx[1:nelx + 1, 0:nely, 1:nelz + 1].reshape((nel, 1)),
                     nodeidx[1:nelx + 1, 1:nely + 1,
                     1:nelz + 1].reshape((nel, 1)),
                     nodeidx[0:nelx, 1:nely + 1, 1:nelz + 1].reshape((nel, 1))]
        self.__cellidx = torch.zeros(
            8, 3, nel, device=device, dtype=torch.int64)
        self.__cellseq = torch.zeros(nel, 8, device=device, dtype=torch.int64)
        for i in range(8):
            self.__cellidx[i] = self.index2xyz(node_list[i])
            self.__cellseq[:, i] = node_list[i].view(-1)

        self.__nodeidx = nodeidx

    def index2xyz(self, index):
        x = index // (self.__nely * self.__nelz)
        temp = index % (self.__nely * self.__nelz)
        y = temp // self.__nely
        z = temp % self.__nely
        xyz = torch.cat((x, y, z), 1)
        return xyz.t()


    def anchor(self, index=0):
        anchor = self.__nodeidx[self.__cellidx[index, 0, 0],
                                self.__cellidx[index, 1, 0], self.__cellidx[index, 2, 0]]
    
        mask = torch.eq(self.__cellseq, anchor)

        anchor_index = torch.arange(
            0, self.__nelx*self.__nely*self.__nelz, device=anchor.device)
        anchor_cell = anchor_index.masked_select(mask.sum(1).type(torch.bool))
        anchor_mask = mask[anchor_cell, :]

        if anchor_mask.dim() == 1:
            anchor_mask.unsqueeze_(0)

        anchor_mask = ~anchor_mask.unsqueeze(2).repeat(
            1, 1, 3).reshape(-1, 24).unsqueeze(2)
        anchor_mask = torch.as_tensor(
            anchor_mask, dtype=torch.float64, device=anchor_cell.device)
        K_mask = anchor_mask.bmm(anchor_mask.transpose(1, 2))
        K_diag = torch.eye(24, 24, dtype=K_mask.dtype, device=K_mask.device).unsqueeze(
            0).repeat(K_mask.shape[0], 1, 1)
        K_mask = torch.logical_or(K_mask, K_diag)
        F_mask = anchor_mask.repeat(1, 1, 6)
        return anchor_cell, K_mask, F_mask

    def homo(self, voxel, U, C_hard,C_soft):

        solid_seq = self.__cellseq[voxel.type(torch.bool).contiguous().view(-1),:]
        n=solid_seq.shape[0]

        volume = self.__lx * self.__ly * self.__lz

        # reshape U(18,N,N,N)->u(6,3,N^3),then transpose u(6,3,N^3)->u(6,N^3,3)
        u_ = U.contiguous().view(6, 3, -1).transpose(1, 2)
        # reshape u(6,N^3,3)->u(6,N^3*3),transpose u(6,N^3*3)->u(N^3*3,6)
        u_ = u_.contiguous().view(6, -1).t()

        # index_u=self.__cellidx.movedim(2,0).contiguous().view(-1).to(U.device)

        index_u = torch.empty(n,8,3,dtype=torch.int64,device=U.device)
        for i in range(3):
            index_u[:,:,i] = 3 * solid_seq + i 
        index_u = index_u.contiguous().view(-1)

        u=u_[index_u,:].contiguous().view(n,24,6)
        print(u.shape)
        del index_u

        I = torch.eye(6, 6,dtype=U.dtype,device=U.device)

        CH = torch.zeros(6, 6,dtype=C_hard.dtype,device=C_hard.device)

        dx = self.__lx / self.__nelx / 2
        dy = self.__ly / self.__nely / 2
        dz = self.__lz / self.__nelz / 2

        pp = torch.as_tensor([-pow(3 / 5, 0.5), 0, pow(3 / 5, 0.5)],dtype=C_hard.dtype,device=C_hard.device)
        ww = torch.as_tensor([5 / 9, 8 / 9, 5 / 9],dtype=C_hard.dtype,device=C_hard.device)

        dxdydz = torch.as_tensor(
            [[-dx, dx, dx, -dx, -dx, dx, dx, -dx], [-dy, -dy, dy, dy, -dy, -dy, dy, dy],
             [-dz, -dz, -dz, -dz, dz, dz, dz, dz]], dtype=C_hard.dtype, device=C_hard.device).t()

        for i in range(3):
            for j in range(3):
                for k in range(3):
                    x = pp[i]
                    y = pp[j]
                    z = pp[k]
                    qxqyqz = torch.as_tensor(
                        [[-((y - 1) * (z - 1)) / 8, ((y - 1) * (z - 1)) / 8, -((y + 1) * (z - 1)) / 8,
                          ((y + 1) * (z - 1)) / 8, ((y - 1) *
                                                    (z + 1)) / 8, -((y - 1) * (z + 1)) / 8,
                          ((y + 1) * (z + 1)) / 8, -((y + 1) * (z + 1)) / 8],
                         [-((x - 1) * (z - 1)) / 8, ((x + 1) * (z - 1)) / 8, -((x + 1) * (z - 1)) / 8,
                          ((x - 1) * (z - 1)) / 8, ((x - 1) * (z + 1)) /
                          8, -((x + 1) * (z + 1)) / 8,
                          ((x + 1) * (z + 1)) / 8, -((x - 1) * (z + 1)) / 8],
                         [-((x - 1) * (y - 1)) / 8, ((x + 1) * (y - 1)) / 8, -((x + 1) * (y + 1)) / 8,
                          ((x - 1) * (y + 1)) / 8, ((x - 1) * (y - 1)) /
                          8, -((x + 1) * (y - 1)) / 8,
                          ((x + 1) * (y + 1)) / 8, -((x - 1) * (y + 1)) / 8]], dtype=C_hard.dtype, device=C_hard.device)

                    J = qxqyqz @ dxdydz
                    invJ = torch.inverse(J)
                    qxyz = invJ @ qxqyqz
                    B = torch.zeros(6, 24, dtype=C_hard.dtype, device=C_hard.device)

                    for i_B in range(8):
                        B[:, i_B * 3:(i_B + 1) * 3] = torch.as_tensor(
                            [[qxyz[0, i_B], 0, 0],
                             [0, qxyz[1, i_B], 0],
                             [0, 0, qxyz[2, i_B]],
                             [qxyz[1, i_B],
                              qxyz[0, i_B], 0],
                             [0, qxyz[2, i_B],
                              qxyz[1, i_B]],
                             [qxyz[2, i_B], 0, qxyz[0, i_B]]], dtype=C_hard.dtype,
                            device=C_hard.device)

                    weight = J.det() * ww[i] * ww[j] * ww[k]

                    # L=I-B@U
                    L = I-B@u
                    # CH=CH+weight*(I-B @ U)^T @ C @ (I-B @ U)
                    CH = torch.addbmm(CH, L.transpose(1, 2), C_hard@L, alpha=weight)
                    del L
        return 1 / volume * CH
    def homo_new(self, voxel, U, ke_hard,fe_hard):
        solid_seq = self.__cellseq[voxel.type(torch.bool).contiguous().view(-1),:]
        n=solid_seq.shape[0]

        volume = self.__lx * self.__ly * self.__lz

        # reshape U(18,N,N,N)->u(6,3,N^3),then transpose u(6,3,N^3)->u(6,N^3,3)
        u_ = U.contiguous().view(6, 3, -1).transpose(1, 2)
        # reshape u(6,N^3,3)->u(6,N^3*3),transpose u(6,N^3*3)->u(N^3*3,6)
        u_ = u_.contiguous().view(6, -1).t()

        # index_u=self.__cellidx.movedim(2,0).contiguous().view(-1).to(U.device)

        index_u = torch.empty(n,8,3,dtype=torch.int64,device=U.device)
        for i in range(3):
            index_u[:,:,i] = 3 * solid_seq + i 
        index_u = index_u.contiguous().view(-1)

        u=u_[index_u,:].contiguous().view(n,24,6)
        del index_u

        CH = torch.zeros(6, 6,dtype=ke_hard.dtype,device=ke_hard.device)

        idx=torch.ones(24,dtype=torch.bool,device=ke_hard.device)
        idx[[0,1,2,4,5,11]]=False
        
        X0=torch.zeros_like(fe_hard)

        X0[idx,:]=torch.inverse(ke_hard[idx,:][:,idx])@fe_hard[idx,:]

        L=X0-u
        CH = torch.addbmm(CH, L.transpose(1, 2), ke_hard@L)

        return 1 / volume * CH
        


    def solve_by_torch(self, voxel, Ke_hard, Fe_hard,Ke_soft,Fe_soft, tol=1e-3, maxit=5000):
        """solve Ku=f by pytorch using linear conjugate gradient method

        Args:
            voxel (torch.cuda.FloatTensor): Voxel, it should be N*N*N 
            Ke (torch.cuda.FloatTensor): Elelment stiffness matrix, it should be 24*24 
            Fe (torch.cuda.FloatTensor): Elelment macrostrain-force matrix, it should be 24*6 
            tol (float, optional): Linear cg tolerance. Defaults to 1e-3.
            maxit (int, optional): Maximum iteration number. Defaults to 1000.

        Returns:
            U (torch.cuda.FloatTensor): it should be 18*N*N*N
        """
        start_ = time() 
        K, F= self.full_assembly(voxel, Ke_hard, Fe_hard,Ke_soft,Fe_soft)
        end_ = time()  
        
        c = (end_ - start_)
        print (c)    
        def Kmm(rhs): return ts.mm(K, rhs)
        start_ = time() 
        X = linear_cg(Kmm, F, tolerance=tol, max_iter=maxit)
        end_ = time()  
        c = (end_ - start_)
        print (c) 
        # sio.savemat('disp.mat',{'U':X.cpu().numpy()})

        # print("displacement:\n")
        # print(X)
        # print("gradient:KU-F")
        # print(ts.mm(K, X) - F)


        # transpose u(6,N^3*3)<-u(N^3*3,6),reshape u(6,N^3,3)-<u(6,N^3*3)
        u = X.t().contiguous().view(6, -1, 3)
        # transpose u(6,3,N^3)<-u(6,N^3,3),reshape U(18,N,N,N)<-u(6,3,N^3),
        u = u.transpose(1, 2).contiguous().view(
            18, self.__nelx, self.__nely, self.__nelz)
        return u

    def MPE_full(self, voxel, Ke_hard, Fe_hard,Ke_soft,Fe_soft, U, nomralized_force=False):
        """compute minimal potential energy according to 0.5*U^T@K@U-U^T@F

        Args:
           voxel (torch.cuda.FloatTensor): Voxel, it should be N*N*N 
           Ke (torch.cuda.FloatTensor): Elelment stiffness matrix, it should be 24*24 
           Fe (torch.cuda.FloatTensor): Elelment macrostrain-force matrix, it should be 24*6 
           U (torch.cuda.FloatTensor): it should be 18*N*N*N

        Returns:
            energy(float) :  minimal potential energy
        """
        

        

        K, F= self.full_assembly(voxel, Ke_hard, Fe_hard,Ke_soft,Fe_soft)

        if nomralized_force:
            F = F.div(F.norm(2, dim=-2, keepdim=True))

        u = U.contiguous().view(6, 3, -1).transpose(1, 2)
        u = u.contiguous().view(6, -1).t()

        energy = ((0.5 * u.t() @ ts.mm(K, u) - u.t() @ F) *
                  torch.eye(6, 6, dtype=u.dtype, device=u.device)).sum() #.item()

        return energy

    def MPEGrad_full(self, voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft, U, nomralized_force=False):
        """compute minimal potential energy gradient according to K @ U - F

        Args:
           voxel (torch.cuda.FloatTensor): Voxel, it should be N*N*N 
           Ke (torch.cuda.FloatTensor): Elelment stiffness matrix, it should be 24*24 
           Fe (torch.cuda.FloatTensor): Elelment macrostrain-force matrix, it should be 24*6 
           U (torch.cuda.FloatTensor): it should be 18*N*N*N

        Returns:
        Grad(torch.cuda.FloatTensor): it should be 18*N*N*N
           F(torch.cuda.FloatTensor): it should be 1*6
        """

        K, F = self.full_assembly(voxel, Ke_hard, Fe_hard,Ke_soft,Fe_soft)

        if nomralized_force:
            F = F.div(F.norm(2, dim=-2, keepdim=True))

        u = U.contiguous().view(6, 3, -1).transpose(1, 2)
        u = u.contiguous().view(6, -1).t()

        

        grad = ts.mm(K, u) - F
       
       # check gradient norm
        F_norm = F.norm(2, dim=-2, keepdim=True)
        grad_norm = grad.div(F_norm).norm(2, dim=-2, keepdim=True)

        # transpose u(6,N^3*3)<-u(N^3*3,6),reshape u(6,N^3,3)-<u(6,N^3*3)
        grad = grad.t().contiguous().view(6, -1, 3)
        # transpose u(6,3,N^3)<-u(6,N^3,3),reshape U(18,N,N,N)<-u(6,3,N^3),
        grad = grad.transpose(1, 2).contiguous().view(
            18, self.__nelx, self.__nely, self.__nelz)

 

        return grad, grad_norm


    def full_assembly(self, voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft):
        n= self.__nelx * self.__nely * self.__nelz

        # k,f=self.per_assembly(voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft)
        Kij,vK,Fij,vF=self.per_assembly(voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft)

        K = torch.sparse_coo_tensor(Kij, vK, (3 * n, 3 * n), device=Ke_hard.device)
        F = torch.sparse_coo_tensor(Fij, vF, (3 * n, 6), device=Fe_hard.device).to_dense()
        K = 0.5 * (K + K.t()).coalesce()
        
        return K, F


    def displacement_correction(self, voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft, U):
        _, F = self.full_assembly(voxel, Ke_hard, Fe_hard,Ke_soft,Fe_soft)
        F_norm = F.norm(2, dim=-2, keepdim=True)
        # reshape U(18,N,N,N)->u(6,3,N^3),then transpose u(6,3,N^3)->u(6,N^3,3)
        u = U.contiguous().view(6, 3, -1).transpose(1, 2)
        # reshape u(6,N^3,3)->u(6,N^3*3),transpose u(6,N^3*3)->u(N^3*3,6)
        u = u.contiguous().view(6, -1).t()

        u_ = u.mul(F_norm)
        # transpose u(6,N^3*3)<-u(N^3*3,6),reshape u(6,N^3,3)-<u(6,N^3*3)
        u_ = u_.t().contiguous().view(6, -1, 3)
        # transpose u(6,3,N^3)<-u(6,N^3,3),reshape U(18,N,N,N)<-u(6,3,N^3),
        u_ = u_.transpose(1, 2).contiguous().view(
            18, self.__nelx, self.__nely, self.__nelz)
        return u_

    def per_assembly(self, voxel, Ke_hard, Fe_hard, Ke_soft, Fe_soft):
        n=voxel.numel()
        vK=torch.empty(n,24,24,dtype=Ke_hard.dtype,device=Ke_hard.device)
        vF=torch.empty(n,24,6,dtype=Fe_hard.dtype,device=Fe_hard.device)

        n_hard=int(voxel.sum().item())
        n_soft=n-n_hard

        hard_mask=torch.as_tensor(voxel,dtype=torch.bool,device=voxel.device).contiguous().view(-1)

        vK[hard_mask,:,:]=Ke_hard.repeat(n_hard,1,1)
        vF[hard_mask,:,:]=Fe_hard.repeat(n_hard,1,1)
        vK[~hard_mask,:,:]=Ke_soft.repeat(n_soft,1,1)
        vF[~hard_mask,:,:]=Fe_soft.repeat(n_soft,1,1)

        anchor_cell, K_mask, F_mask=self.anchor()

         # new anchor by set F(anchor)->0
        for idx in range(K_mask.shape[0]):
            vK[anchor_cell[idx], :, :] = vK[anchor_cell[idx], :, :] * \
                                         K_mask[idx, :, :]
            vF[anchor_cell[idx], :, :] = vF[anchor_cell[idx], :, :] * \
                                         F_mask[idx, :, :]

        vK=vK.contiguous().view(-1)
        vF=vF.contiguous().view(-1)
        # dof_indices=self.__cellidx.transpose(1,2).transpose(0,1).contiguous().view(n,-1)
        # dof_indices=torch.as_tensor(self.__cellidx,dtype=Ke_hard.dtype,device=Ke_hard.device).transpose(1,2).transpose(0,1).contiguous().view(n,-1).unsqueeze(2)

        dof_indices=torch.empty(n,8,3,dtype=self.__cellseq.dtype,device=self.__cellseq.device)
        for i in range(3):
            dof_indices[:,:,i]= 3*self.__cellseq+i
        dof_indices=torch.as_tensor(dof_indices,dtype=Ke_hard.dtype,device=Ke_hard.device).view(-1,24).unsqueeze(2)


        # torch-sparse_solver version
        Kij = torch.zeros(2, 24 * 24 * n, device=Ke_hard.device)
        temp = torch.ones(
            n, 24, 1, dtype=Ke_hard.dtype, device=Ke_hard.device)
        Kij[0, :] = dof_indices.bmm(temp.transpose(1, 2)).contiguous().view(-1)
        Kij[1, :] = temp.bmm(dof_indices.transpose(1, 2)).contiguous().view(-1)

       

        Fij = torch.zeros(2, 24 * 6 * n, device=Fe_hard.device)
        temp_F = torch.ones(n, 6, 1, dtype=Fe_hard.dtype, device=Fe_hard.device)
        Fij[0, :] = dof_indices.bmm(
            temp_F.transpose(1, 2)).contiguous().view(-1)
        Fij[1, :] = torch.arange(0, 6, device=Fe_hard.device).unsqueeze(0).unsqueeze(
            0).repeat(n, 24, 1).contiguous().view(-1)

        Kij=torch.as_tensor(Kij,dtype=torch.int64)
        Fij=torch.as_tensor(Fij,dtype=torch.int64)

        Kij,vK=coalesce(Kij,vK,3*n,3*n)
        Fij,vF=coalesce(Fij,vF,3*n,6)

        P=JacobPreconditioner(Kij,vK,3*n)
        # P=ColNormPreconditioner(Kij,vK,3*n,0)
        # P=ColNormPreconditioner(Kij,vK,3*n,2)
        # P=ColNormPreconditioner(Kij,vK,3*n,float('inf'))
        P.apply(Kij,vK,3*n,3*n)
        P.apply(Fij,vF,3*n,6)
       

        # print(Kij.shape)
        # print(vK.shape)
        # sio.savemat("matrix.mat",{'Kij':Kij.cpu().numpy(),'vK':vK.cpu().numpy(),'Fij':Fij.cpu().numpy(),'vF':vF.cpu().numpy()})
        return Kij,vK,Fij,vF
    
