import numpy as np
from numpy import linalg as LA
from numpy.linalg import inv
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from loss import loss_be
import torch
import time
torch.set_default_tensor_type(torch.DoubleTensor)
dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def FF(U):
    return U * (1 - U * U)
    # return 0


def circle_x(P, U):
    return torch.matmul(P, U)


def circle_y(P, U):
    return torch.matmul(U, P)

def mass_engy(U, hx, hy,epsilon):
    mass = 0
    egy1 = 0
    egy2 = 0
    N=np.shape(U)[2]
    M=np.shape(U)[3]
    for i in range(N-1):
        for j in range(M-1):
            uu = 0.25 * (U[:,:,i, j] + U[:,:,i + 1, j] + U[:,:,i, j + 1] + U[:,:,i + 1, j + 1])
            mass = mass + uu
            egy1 = egy1 + (uu * uu - 1) ** 2
            udx = 0.5 * (U[:,:,i + 1, j] - U[:,:,i, j] + U[:,:,i + 1, j + 1] - U[:,:,i, j + 1]) / hx
            udy = 0.5 * (U[:,:,i, j + 1] - U[:,:,i, j] + U[:,:,i + 1, j + 1] - U[:,:,i + 1, j]) / hy
            egy2 = egy2 + (udx * udx + udy * udy)
    mass = mass*hx * hy
    # mass=np.mean(U)
    # mass = np.sum(U) * hx * hy
    egy1 = 0.25 * egy1
    egy2 = 0.5 * egy2 * epsilon ** 2
    egy = (egy1 + egy2) * hx * hy
    return mass, egy

# def U_full(U):
#     N = np.shape(U)[2]
#     M = np.shape(U)[3]
#     U_full =torch.zeros((N + 1, M + 1))
#     U_full[1:N + 1, :M] = U[:N, :M]
#     U_full[0, :M] = U[N-1, :M]
#     U_full[:, M] = U_full[:, 0]
#     return U_full
def U_full(U):

    full_left = torch.clone(U[:, :, :, [0]])
    full = torch.cat((U, full_left), 3)
    full_bottom = torch.clone(full[:, :, [-1], :])
    full = torch.cat((full_bottom, full), 2)

    return full

def numerical_all(U,domain,n,dt,epsilon,type='BE'):
    N = U.size()[2]
    M = U.size()[3]

    lx = domain[1] - domain[0]
    ly = domain[3] - domain[2]

    Vol = lx * ly
    hx = lx / N
    hy = ly / M
    K = n
    A = torch.zeros_like(U)[0][0]
    B = torch.zeros_like(U)[0][0]
    for i in range(N - 1):
        A[i, i] = -2
        A[i, i + 1] = 1
        A[i + 1, i] = 1
    A[N - 1, N - 1] = -2
    A[0, N - 1] = 1
    A[N - 1, 0] = 1

    for j in range(M - 1):
        B[j, j] = -2
        B[j, j + 1] = 1
        B[j + 1, j] = 1
    B[M - 1, M - 1] = -2
    B[0, M - 1] = 1
    B[M - 1, 0] = 1


    A = epsilon ** 2 * A/(hx*hx)
    # [Dx,Px] = torch.linalg.eig(A)
    Dx, Px = torch.eig(A,eigenvectors=True)
    # aa=Dx[1]
    # bb=aa[0]
    # Px=torch.view_as_real(Px)[:,:,:, :,[0]]
    # Px=torch.squeeze(Px,4)
    Pxi = torch.linalg.inv(Px)

    # a= np.matmul(np.matmul(Px,np.diag(Dx)),Pxi)
    B = epsilon ** 2 * B / (hy * hy)
    Dy,Py = torch.eig(B,eigenvectors=True)
    # Py = torch.view_as_real(Py)[:, :, :, :, [0]]
    # Py = torch.squeeze(Py, 4)
    Pyi = torch.inverse(Py)

    Num=[]

    iter_max=50


    if type == 'BE':
        DD= torch.zeros_like(U)

        for i in range(N):
            for j in range(M):
                DD[i, j] = 1 - dt* (Dx[i][0] + Dy[j][0])

        # a, b = mass_engy(U_full(U), hx, hy,epsilon)
        # # c=np.mean(U_full(U))
        # mass = [a]
        # egy = [b]

        # Num=[]
        for k in range(n):
            RH_curr = U

            # u_n = torch.unsqueeze(torch.unsqueeze(torch.from_numpy(U), 0), 0)
            for iter in range(iter_max):

                FU=FF(U)
                RH = RH_curr+dt*FU
                # RH = RH_curr + dt * FU
                # a=circle_x(Pxi, RH)
                U_new = circle_y(Pyi,circle_x(Px,torch.div(circle_y(Py,circle_x(Pxi,RH)),DD)))

                if (torch.max(torch.abs(U-U_new))) < 1e-6:
                    U= U_new
                    break
                U=U_new
            # u_next = torch.unsqueeze(torch.unsqueeze(torch.from_numpy(U_new), 0), 0)
            # L = loss_be(u_n, u_next, deltaT, domain, epsilon, type='BE')[0][0].cpu().detach().numpy()
            # P = np.linalg.norm(L)
            # print(P)

            U_full1 = U_full(U)
            Num.append(U_full1.cpu().detach().numpy())
            # a,b = mass_engy(U_full1,hx,hy,epsilon)
            # c=np.mean(U_full1)
            # d=np.mean(U)
            # mass.append(a)
            # egy.append(b)
            # if (np.abs(egy[-1]-egy[-2])<max(egy[-2],1)*1e-6):
            #     n=k
            #     break


    if type =='CN':
        DD = torch.zeros_like(U)[0][0]
        for i in range(N):
            for j in range(M):
                DD[i, j] = 1 - 0.5*dt * (Dx[i][0] + Dy[j][0])

        # a, b = mass_engy(U_full(U), hx, hy,epsilon)
        # mass = [a]
        # egy = [b]
        # Num = []
        for k in range(int(n)):
            FU=FF(U)
            RH_curr = U+0.5*dt*(circle_x(A,U)+circle_y(B,U))+0.5*dt*FU

            # u_n = torch.unsqueeze(torch.unsqueeze(torch.from_numpy(U), 0), 0)

            for iter in range(iter_max):
                FU=FF(U)
                RH = RH_curr+0.5*dt*FU
                U_new = circle_y(Pyi,circle_x(Px,torch.div(circle_y(Py,circle_x(Pxi,RH)),DD)))
                if (torch.max(torch.abs(U - U_new))) < 1e-6:
                    U = U_new
                    break
                U = U_new

            # u_next = torch.unsqueeze(torch.unsqueeze(torch.from_numpy(U_new), 0), 0)
            # L = loss_be(u_n, u_next, deltaT, domain, epsilon, type='CN')[0][0].cpu().detach().numpy()
            # P=np.linalg.norm(L)
            # print(P)

            U_full1 = U_full(U)
            Num.append(U_full1.cpu().detach().numpy())
            # a, b = mass_engy(U_full1, hx, hy,epsilon)
            # mass.append(a)
            # egy.append(b)
            # if (np.abs(egy[-1] - egy[-2]) < max(egy[-2], 1) * 1e-6):
            #     n = k
            #     break
    # plt.plot(np.linspace(1, len(mass), len(mass)), mass, label='BE mass')
    # plt.legend()
    # plt.xlabel('Timesteps')
    # plt.ylabel('Mass')
    # plt.show()
    U_t=U_full1.cpu().detach().numpy()
    return U_t, Num



# torch.manual_seed(5)
# x=torch.rand(1, 1, 256, 256,dtype=torch.float64)
# # if torch.cuda.is_available():
# #    x = x.to("cuda:0")
# U= ((-1-1) * x+1)*0.9
# domain = [-0.5,0.5,-0.5,0.5]
# deltaT=0.1
# epsilon =0.01
# n=100
# start=time.time()
# a=numerical_all(U,domain,n,deltaT,epsilon,type='BE')
# stop =time.time()
# print(f"testing time: {stop - start}s")
# a=a[0][0].cpu().detach().numpy()
# fig, ax = plt.subplots()
#
# im = ax.imshow(a, interpolation='bilinear', cmap=cm.RdYlGn)
# plt.title('Numerical Solution')
# plt.colorbar(im)
# plt.show()
# np.random.seed(seed=10)
# U=(-1+np.random.rand(128,128)*2)*0.8
# U= np.zeros((32,32))
# U[5:9,5:9] = 1
# domain = [-0.5,0.5,-0.5,0.5]
# deltaT=0.1
# epsilon =0.01
# a=numerical(U, domain, 10, deltaT, epsilon, type='CN')
# u_next = torch.unsqueeze(torch.unsqueeze(torch.from_numpy(a), 0), 0)
# u_n = torch.unsqueeze(torch.unsqueeze(torch.from_numpy(U), 0), 0)
# L=loss_be(u_n, u_next, deltaT, domain,epsilon,type='BE')[0][0].cpu().detach().numpy()

# fig, ax = plt.subplots()
# im = ax.imshow(L, interpolation='bilinear', cmap=cm.RdYlGn)
# plt.title('Loss')
# plt.colorbar(im)
# plt.show()

# b=numerical(U, [-0.5,0.5,-0.5,0.5], 100,0.1,0.01, type='BE')
# x=[1,2,4,8]

# for i in range(4):
#     ai = numerical(U, [-0.5,0.5,-0.5,0.5], 1*x[i], 0.1/x[i], 0.01, type='BE')
#     ci = np.linalg.norm(ai-b,'fro')
#     print(ci)

# c=a-b
# print(a-b)
# error=np.linalg.norm(c,'fro')
# print(error)
# fig, ax = plt.subplots()
# im = ax.imshow(a, interpolation='bilinear', cmap=cm.RdYlGn)
# plt.title('Numerical Solution')
# plt.colorbar(im)
# fig, ax2 = plt.subplots()
# im2 = ax2.imshow(b, interpolation='bilinear', cmap=cm.RdYlGn)
# plt.title('Numerical Solution')
# plt.colorbar(im2)
#
# plt.show()




# N=M=256
# dt=0.00001
# domain=[-1,1,-1,1]
# epsilon = 1
# n=20
# xx = np.linspace(-1, 1, N)
# yy = np.linspace(-1, 1, M)
#
# X, Y = np.meshgrid(xx, yy)
# Z = np.sin(np.pi*(X-1/4))*np.sin(2*np.pi*(Y-1/8))
# Z_Num=numerical(Z,domain,n,dt,epsilon,type='BE')
# Zexact = np.exp(-5*np.pi**2*dt*n)*np.sin(np.pi*(X-1/4))*np.sin(2*np.pi*(Y-1/8))
# Z_exact = U_full(Zexact)
# error = Z_exact - Z_Num
# Norm_error = np.linalg.norm(error) / np.linalg.norm(Z_exact)
#
# fig, ax = plt.subplots()
# im = ax.imshow(Z_exact, interpolation='bilinear', cmap=cm.RdYlGn)
# plt.title('Z_exact')
# plt.colorbar(im)
# plt.show()
