
import torch

import numpy as np
import scipy

from scipy import sparse
from scipy import optimize

import os
torch.set_default_tensor_type(torch.DoubleTensor)
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"



def Numerical_solution(x0,deltaT,domain,Hx,Hy,Hxx,Hyy,Hxy,epsilon,type='BE'):
    A=x0.shape
    h_x =A[2]
    w_x =A[3]
    lx = domain[1] - domain[0]
    ly = domain[3] - domain[2]

    deltax = lx / (w_x)
    deltay = ly / (h_x)




    x1 = torch.reshape(x0, [-1, ])
    x2 = x1.to('cpu').detach().numpy()
    y=np.zeros((h_x*w_x,h_x*w_x))
    z=np.zeros((1,h_x*w_x))
    def diag(x,a):
        for i in range(h_x*w_x):
            y[i,i]=a
        return x

    def repeate(x):
        for i in range(h_x * w_x):
            z[:,i] = x[i] ** 2
        a = np.repeat(z, [h_x * w_x], axis=0)
        return a


    def laplace(x):
        return Hx.dot(x)/(deltax*deltax)+Hy.dot(x)/(deltay*deltay)
    def biolaplace(x):
        return Hxx.dot(x)/(deltax**4)+Hyy.dot(x)/(deltay**4)+Hxy.dot(x)/(deltax**2*deltay**2)
    def integral (x):
        return np.sum(x)/h_x/w_x
    def BE_f(x):
        return (x-x2)/deltaT-(epsilon*epsilon*laplace(x)+(x-x**3) -integral(x-x**3))

        # return x - x2 - deltaT * (epsilon * epsilon * laplace(x) + x - x ** 3)
    def CN_f(x):
        return x-x2-deltaT*(epsilon*epsilon*(laplace(x)+laplace(x2))/2+(x+x2)/2-(x**3+x2**3)/2 - (integral(x-x**3)+integral(x2-x2**3))/2)
    def BE_devirf(x):
        return diag(x,1/deltaT-1)-epsilon*epsilon/h_x/w_x*(Hx+Hy)-3*x.dot(x)-h_x*w_x*(1-3*repeate(x))
    if type == 'BE':
        f = BE_f
    elif type == 'CN':
        f = CN_f

    u_next0=scipy.optimize.newton(f,fprime=BE_devirf,fprime2=None,x0=x2,tol=1e-10,maxiter=1000,disp=True)
    a = (u_next0-x2)/deltaT-(epsilon*epsilon*laplace(u_next0)+(u_next0-u_next0**3) -integral(u_next0-u_next0**3))
    print(a)
    u_next0 = torch.unsqueeze(torch.unsqueeze(torch.unsqueeze(torch.from_numpy(u_next0), 0), 0), 0)
    u_next1 = torch.reshape(u_next0, (h_x,w_x))
    u_next = torch.unsqueeze(torch.unsqueeze(u_next1, 0), 0)
    return u_next

def operatorx(x):
    batch_size = 1
    K = 1
    h_x = x.size()[2]
    w_x = x.size()[3]


    A = torch.zeros(batch_size,K,w_x,w_x)
    for a in range(w_x):
        A[:,:,a,a]=-2
    # A.fill_diagonal_(-2)
    for i in range(w_x - 1):
        A[:,:,i + 1, i] = 1
    for j in range(w_x - 1):
        A[:,:,j, j + 1] = 1
    A[:,:,0,w_x-1] = 1
    A[:,:,w_x-1,0] = 1

    H = torch.zeros(batch_size,K,w_x*h_x,w_x*h_x)
    for i in range(h_x):
        H[:,:,i * w_x:(i + 1) * w_x, i * w_x:(i + 1) * w_x] = A
    H=torch.squeeze(torch.squeeze(H, 0), 0).to('cpu').numpy()
    H= sparse.csr_matrix(H)

    # return H.to('cuda')
    return H

def operatory(x):
    batch_size = 1
    K = 1
    h_x = x.size()[2]
    w_x = x.size()[3]

    H = torch.zeros(batch_size,K,h_x * w_x, h_x * w_x)
    A = torch.zeros(batch_size,K, w_x,w_x)
    for a in range(h_x * w_x):
        H[:, :, a, a] = -2
    # H.fill_diagonal_(-2)
    for b in range(w_x):
        A[:, :, b, b] = 1
    for j in range(h_x - 1):
        H[:, :, j * w_x:(j + 1) * w_x, (j + 1) * w_x:(j + 2) * w_x] = A
        H[:, :, (j + 1) * w_x:(j + 2) * w_x, j * w_x:(j + 1) * w_x] = A
    H[:, :, :w_x, (h_x - 1) * w_x:(w_x) * h_x] = A
    H[:, :, w_x * (h_x - 1):w_x * h_x, :w_x] = A
    H=torch.squeeze(torch.squeeze(H, 0), 0).to('cpu').numpy()
    H = sparse.csr_matrix(H)
    # return H.to('cuda')
    return H


def bioperatorx(x):
    batch_size = 1
    K = 1
    h_x = x.size()[2]
    w_x = x.size()[3]

    A = torch.zeros(batch_size, K, w_x, w_x)
    for a in range(w_x):
        A[:, :, a, a] = 6
    # A.fill_diagonal_(-2)
    for i in range(w_x - 1):
        A[:, :, i + 1, i] = -4
    for k in range(w_x -2):
        A[: ,:, k + 2, k ]= 1
    for j in range(w_x - 1):
        A[:, :, j, j + 1] = -4
    for j in range(w_x - 2):
        A[:, :, j, j + 2] = 1
    A[:, :, 0, w_x - 1] = -4
    A[:, :, w_x - 1, 0] = -4
    A[: ,:, w_x - 1, 1] = 1
    A[:, :, 0, w_x - 2] = 1
    A[:, :, w_x - 2, 0] = 1
    A[:, :, 1, w_x - 1] = 1

    H = torch.zeros(batch_size, K, w_x * h_x, w_x * h_x)
    for i in range(h_x):
        H[:, :, i * w_x:(i + 1) * w_x, i * w_x:(i + 1) * w_x] = A
    H = torch.squeeze(torch.squeeze(H, 0), 0).to('cpu').numpy()
    H = sparse.csr_matrix(H)
    return H

def bioperatory(x):
    batch_size = 1
    K = 1
    h_x = x.size()[2]
    w_x = x.size()[3]

    H = torch.zeros(batch_size, K, h_x * w_x, h_x * w_x)
    A = torch.zeros(batch_size, K, w_x, w_x)
    B = torch.zeros(batch_size, K, w_x, w_x)
    for a in range(h_x * w_x):
        H[:, :, a, a] = 6
    # H.fill_diagonal_(-2)
    for b in range(w_x):
        A[:, :, b, b] = 1
        B[:, :, b, b] = -4
    for j in range(h_x - 1):
        H[:, :, j * w_x:(j + 1) * w_x, (j + 1) * w_x:(j + 2) * w_x] = B
        H[:, :, (j + 1) * w_x:(j + 2) * w_x, j * w_x:(j + 1) * w_x] = B
    for j in range(h_x - 2):
        H[:, :, j * w_x:(j + 1) * w_x, (j + 2) * w_x:(j + 3) * w_x] = A
        H[:, :, (j + 2) * w_x:(j + 3) * w_x, j * w_x:(j + 1) * w_x] = A

    H[:, :, :w_x, (h_x - 1) * w_x:(w_x) * h_x] = B
    H[:, :, :w_x, (h_x - 2) * w_x:(w_x) * (h_x-1)] = A
    H[:, :, w_x * (h_x - 1):w_x * h_x, w_x:w_x * 2] = A
    H[:, :, w_x:w_x*2, (h_x - 1) * w_x:(w_x) * h_x] = A
    H[:, :, w_x * (h_x - 1):w_x * h_x, :w_x] = B
    H[:, :, w_x * (h_x - 2):w_x * (h_x-1), :w_x] = A

    H = torch.squeeze(torch.squeeze(H, 0), 0).to('cpu').numpy()
    H = sparse.csr_matrix(H)
    return H
def bioperatorxy(x):
    batch_size = 1
    K = 1
    h_x = x.size()[2]
    w_x = x.size()[3]
    H = torch.zeros(batch_size, K, h_x * w_x, h_x * w_x)
    A = torch.zeros(batch_size, K, w_x, w_x)
    B = torch.zeros(batch_size, K, w_x, w_x)
    C = torch.zeros(batch_size, K, w_x, w_x)
    for i in range(w_x):
        A[:, :, i, i] = 8
        B[:, :, i, i] = -4
    for i in range(w_x - 1):
        A[:, :, i + 1, i] = -4
        A[:, :, i, i + 1] = -4
        B[:, :, i + 1, i] = 2
        B[:, :, i, i + 1] = 2
    A[:, :, 0, w_x-1] = -4
    A[:, :, w_x-1, 0] = -4
    B[:, :, 0, w_x-1] = 2
    B[:, :, w_x-1, 0] = 2
    for i in range(h_x):
        H[:, :, i * w_x:(i+1) * w_x,  i * w_x:(i+1) * w_x] = A
    for j in range(h_x - 1):
        H[:, :, j * w_x:(j + 1) * w_x, (j + 1) * w_x:(j + 2) * w_x] = B
        H[:, :, (j + 1) * w_x:(j + 2) * w_x, j * w_x:(j + 1) * w_x] = B
    H[:, :, w_x * (h_x - 1):w_x * h_x, :w_x] = B
    H[:, :, :w_x, (h_x - 1) * w_x:(w_x) * h_x] = B

    H = torch.squeeze(torch.squeeze(H, 0), 0).to('cpu').numpy()
    H = sparse.csr_matrix(H)
    return H



# a = operatorx(x)
# b = operatory(x)

# def Implicit_Euler(x,deltaT,domain,Hx,Hy):
#     h_x = x.size()[2]
#     w_x = x.size()[3]
#     lx = domain[1] - domain[0]
#     ly = domain[3] - domain[2]
#
#     deltax = lx / (w_x)
#     deltay = ly / (h_x)
#
#     x0 = torch.reshape(x, [-1, ])
#     x1 = x0.to('cpu').detach().numpy()
#     P = (torch.eye(h_x * w_x).to('cuda') - deltaT * Hx / (deltax * deltax) - deltaT * Hy / (deltay * deltay))
#     P = (torch.squeeze(torch.squeeze(P, 0), 0)).to('cpu').detach().numpy()
#     LU,P1=lu_factor(P)
#     # x_next0, info = cg(P, x1, tol=1e-09)
#     x_next0 = lu_solve((LU,P1),x1)
#     x_next0 = torch.unsqueeze(torch.unsqueeze(torch.unsqueeze(torch.from_numpy(x_next0), 0), 0),0)
#     x_next = torch.reshape(x_next0, (h_x, w_x))
#     x_next = torch.unsqueeze(torch.unsqueeze(x_next, 0), 0)
#
#     return x_next
#
# def Crank_Nicolson(x,deltaT,domain,Hx,Hy):
#     h_x = x.size()[2]
#     w_x = x.size()[3]
#     lx = domain[1] - domain[0]
#     ly = domain[3] - domain[2]
#
#     deltax = lx / (w_x)
#     deltay = ly / (h_x)
#
#     x0 = torch.reshape(x, [-1, ])
#     x1 = x0.to('cpu').detach().numpy()
#     P = (torch.eye(h_x * w_x).to('cuda') - deltaT * Hx / (2 * deltax * deltax) - deltaT * Hy / (2 * deltay * deltay))
#     P = (torch.squeeze(torch.squeeze(P, 0), 0)).to('cpu').detach().numpy()
#     A = torch.eye(h_x * w_x).to('cuda') + deltaT * Hx / (2 * deltax * deltax) + deltaT * Hy / (2 * deltay * deltay)
#     A = (torch.squeeze(torch.squeeze(A, 0), 0)).to('cpu').detach().numpy()
#     B = np.matmul(A,x1)
#     LU,P1 = lu_factor(P)
#     x_next0 = lu_solve((LU,P1),B)
#     x_next0 = torch.unsqueeze(torch.unsqueeze(torch.unsqueeze(torch.from_numpy(x_next0), 0), 0),0)
#     x_next = torch.reshape(x_next0, (h_x, w_x))
#     x_next = torch.unsqueeze(torch.unsqueeze(x_next, 0), 0)
#
#     return x_next

# a=np.matrix([[1],[2]])
# a=np.array((1,2))
# b=np.array((-4,-2))
# print(a)
# print(np.sum(a))
#
# # def mean(x):
# #     return (x[0]+x[1])/2
# def f(x):
#     return 1/2*x[:]-a-(x[0]+x[1])/2
# b=scipy.optimize.newton(f,fprime=None,fprime2=None,x0=a/2,tol=1e-6,maxiter=100,disp=True)
# print(b)
# print(1/2*b-a-np.mean(b))
z=np.matrix([[1,2],[3,4]])
# a=z./z
b=np.divide(z,z)
print(z)
print(b)
