import scipy.io
import torch
from matplotlib import pyplot as plt
from torch import nn, Tensor

device = 'cuda'
layer = [2,4,3]
W = [20.0,1.0,1.0,20.0]
lr = 6e-4
neurons = [30,20,25]

def neural_net(layer,neurons):
    n = [nn.Linear(2,neurons),nn.Tanh()]
    for _ in range(layer-1):
        n.append(nn.Linear(neurons,neurons))
        n.append(nn.Tanh())
    n.append(nn.Linear(neurons,1))
    net = nn.Sequential(*n)
    net.apply(init_Xavier)
    return net

def init_Xavier(m):
    if type(m) == nn.Linear:
        nn.init.xavier_normal_(m.weight)
        nn.init.zeros_(m.bias)


def derivation(u,x,rank):
    if rank==1:
        return torch.autograd.grad(u,x,grad_outputs=torch.ones_like(u),create_graph=True,only_inputs=True)[0]
    else:
        return derivation(derivation(u,x,1),x,rank-1)

def loss_f(xy_train,u_train,x1,y1,x2,y2,x3,y3,xi1,xi2,yi1,yi2,net1,net2,net3,w):
    loss = nn.MSELoss()
    xy1 = torch.cat((x1,y1),dim=1)
    xy2 = torch.cat((x2,y2),dim=1)
    xy3 = torch.cat((x3, y3), dim=1)
    xyi1 = torch.cat((xi1, yi1), dim=1)
    xyi2 = torch.cat((xi2, yi2), dim=1)
    u_hat = net1(xy_train)
    u1 = net1(xy1)
    u2 = net2(xy2)
    u3 = net3(xy3)
    u_i11 = net1(xyi1)
    u_i12 = net2(xyi1)
    u_i21 = net1(xyi2)
    u_i22 = net3(xyi2)
    u1_xx = derivation(u1,x1,2)
    u1_yy = derivation(u1,y1,2)
    f1 = (torch.exp(xy1[:, 0]) + torch.exp(xy1[:, 1])).reshape((-1,1))
    u2_xx = derivation(u2, x2, 2)
    u2_yy = derivation(u2, y2, 2)
    f2 = (torch.exp(xy2[:,0])+torch.exp(xy2[:,1])).reshape((-1,1))
    u3_xx = derivation(u3, x3, 2)
    u3_yy = derivation(u3, y3, 2)
    f3 = (torch.exp(xy3[:, 0]) + torch.exp(xy3[:, 1])).reshape((-1,1))
    resloss1 = loss(u1_xx+u1_yy-f1,torch.zeros_like(f1,dtype=torch.float32))
    resloss2 = loss(u2_xx+u2_yy-f2,torch.zeros_like(f2,dtype=torch.float32))
    resloss3 = loss(u3_xx + u3_yy - f3,torch.zeros_like(f3,dtype=torch.float32))
    #resloss = resloss3+resloss2+resloss1
    #通量的连续性
    ui1A = ((u_i11+u_i12)/2.0).detach()
    ui2A = ((u_i21+u_i22)/2.0).detach()
    A1_loss = loss(u_i11,ui1A)+loss(u_i21,ui2A)
    A2_loss = loss(u_i12,ui1A)
    A3_loss = loss(u_i22,ui2A)
    #A_loss = loss(u_i11,ui1A)+loss(u_i12,ui1A)+loss(u_i21,ui2A)+loss(u_i22,ui2A)
    # 残差连续性
    ui11_xx = derivation(u_i11,xi1,2)
    ui11_yy = derivation(u_i11,yi1,2)
    ui12_xx = derivation(u_i12,xi1,2)
    ui12_yy = derivation(u_i12,yi1,2)
    ui21_xx = derivation(u_i21, xi2, 2)
    ui21_yy = derivation(u_i21, yi2, 2)
    ui22_xx = derivation(u_i22, xi2, 2)
    ui22_yy = derivation(u_i22, yi2, 2)
    fi1 = (torch.exp(xyi1[:, 0]) + torch.exp(xyi1[:, 1])).reshape((-1,1))
    fi2 = (torch.exp(xyi2[:, 0]) + torch.exp(xyi2[:, 1])).reshape((-1,1))
    res_i11 = ui11_xx+ui11_yy-fi1
    res_i12 = ui12_xx+ui12_yy-fi1
    res_i21 = ui21_xx + ui21_yy - fi2
    res_i22 = ui22_xx + ui22_yy - fi2
    res_continue1 = loss(res_i11,res_i12.detach())+loss(res_i21,res_i22.detach())
    res_continue2 = loss(res_i11.detach(), res_i12)
    res_continue3 = loss(res_i21.detach(), res_i22)

    l1 = w[0]*loss(u_hat,u_train)+w[1]*resloss1+w[2]*res_continue1+w[3]*A1_loss
    l2 = w[1]*resloss2+w[2]*res_continue2+w[3]*A2_loss
    l3 = w[1]*resloss3+w[2]*res_continue3+w[3]*A3_loss
    return [l1,l2,l3]

def train_Xpinn(iterations,xy_train,u_train,x1,y1,x2,y2,x3,y3,xi1,xi2,yi1,yi2,net1,net2,net3,w,device='cpu'):
    l1_list = []
    l2_list = []
    l3_list = []
    net1 = net1.to(device)
    net2 = net2.to(device)
    net3 = net3.to(device)
    params = [xy_train,u_train,x1,y1,x2,y2,x3,y3,xi1,xi2,yi1,yi2]
    params = [i.to(device) for i in params]
    xy_train, u_train, x1, y1, x2, y2, x3, y3, xi1, xi2, yi1, yi2 = params
    optim1 = torch.optim.Adam(net1.parameters(),lr=lr)
    optim2 = torch.optim.Adam(net2.parameters(), lr=lr)
    optim3 = torch.optim.Adam(net3.parameters(), lr=lr)
    for epoch in range(iterations):
        l1,l2,l3 = loss_f(xy_train,u_train,x1,y1,x2,y2,x3,y3,xi1,xi2,yi1,yi2,net1,net2,net3,w)
        optim1.zero_grad()
        optim2.zero_grad()
        optim3.zero_grad()
        l1.backward(retain_graph=True)
        optim1.step()

        optim2.zero_grad()
        l2.backward(retain_graph=True)
        optim2.step()

        optim3.zero_grad()
        l3.backward(retain_graph=True)
        optim3.step()
        l1_list.append(Tensor.cpu(l1).detach().numpy())
        l2_list.append(Tensor.cpu(l2).detach().numpy())
        l3_list.append(Tensor.cpu(l3).detach().numpy())
        if epoch % 500 == 0:
            print(l1,l2,l3)
    return l1_list,l2_list,l3_list

def pre_Xpinn(u1,u2,u3,xy1,xy2,xy3,net1,net2,net3):
    u1_hat = net1(xy1)
    u2_hat = net2(xy2)
    u3_hat = net3(xy3)


if __name__ == '__main__':
    data = scipy.io.loadmat('Xpinn_data.mat')
    #边界点/值
    x_train = torch.tensor(data['xb'].flatten()[:,None],dtype=torch.float32)
    y_train = torch.tensor(data['yb'].flatten()[:,None],dtype=torch.float32)
    xy_train = torch.cat((x_train,y_train),dim=1)
    u_train = torch.tensor(data['u_train'].flatten()[:,None],dtype=torch.float32)
    #残差点
    x1 = torch.tensor(data['sampledX1'].flatten()[:,None],dtype=torch.float32,requires_grad=True)
    y1 = torch.tensor(data['sampledY1'].flatten()[:,None],dtype=torch.float32,requires_grad=True)
    x2 = torch.tensor(data['sampledX2'].flatten()[:, None],dtype=torch.float32,requires_grad=True)
    y2 = torch.tensor(data['sampledY2'].flatten()[:, None],dtype=torch.float32,requires_grad=True)
    x3 = torch.tensor(data['sampledX3'].flatten()[:, None],dtype=torch.float32,requires_grad=True)
    y3 = torch.tensor(data['sampledY3'].flatten()[:, None],dtype=torch.float32,requires_grad=True)
    #界面点
    xi1 = torch.tensor(data['xi1'].flatten()[:,None],dtype=torch.float32,requires_grad=True)
    yi1 = torch.tensor(data['yi1'].flatten()[:,None],dtype=torch.float32,requires_grad=True)
    xi2 = torch.tensor(data['xi2'].flatten()[:, None],dtype=torch.float32,requires_grad=True)
    yi2 = torch.tensor(data['yi2'].flatten()[:, None],dtype=torch.float32,requires_grad=True)

    net1 = neural_net(layer[0],neurons[0])
    net2 = neural_net(layer[1],neurons[1])
    net3 = neural_net(layer[2], neurons[2])
    num_epoch = 25000
    l1,l2,l3 = train_Xpinn(num_epoch,xy_train,u_train,x1,y1,x2,y2,x3,y3,xi1,xi2,yi1,yi2,net1,net2,net3,W,device)

    plt.figure()
    plt.semilogy(l1,label='sub-net1')
    plt.semilogy(l2,label='sub-net2')
    plt.semilogy(l3,label='sub-net3')
    plt.legend(loc='best')
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.show()