import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import torch

import matplotlib.pyplot as plt
import copy

# Neural Network
class MLP(torch.nn.Module):
    def __init__(self):
        super(MLP, self).__init__()
        # torch.nn.Tanh()#不行
        activeFun = torch.nn.Softplus()
        # activeFun = torch.nn.ReLU()#非线性能力不行
        # activeFun = torch.nn.LeakyReLU()#非线性能力不行
        # activeFun = torch.nn.ELU()#效果凑合，不如Sofrplus

        nodeNum = 32 #32默认
        self.net = torch.nn.Sequential(
            torch.nn.Linear(2, nodeNum),
            activeFun, #torch.nn.Tanh(),
            torch.nn.Linear(nodeNum, nodeNum),
            activeFun,
            torch.nn.Linear(nodeNum, nodeNum),
            activeFun,
            torch.nn.Linear(nodeNum, nodeNum),
            activeFun,
            torch.nn.Linear(nodeNum, 1)
        )

    def forward(self, x):
        return self.net(x)

def gradients(u, x, order=1):
    if order == 1:
        return torch.autograd.grad(u, x, grad_outputs=torch.ones_like(u), create_graph=True, only_inputs=True, )[0]
    else:
        return gradients(gradients(u, x), x, order=order - 1)
######################################################################################
def GetXYLoss(u, v, pX1, pY1, pX2, pY2):
    xy2 = torch.cat([pX2, pY2], dim=1)
    x = u(xy2)
    y = v(xy2)
    xloss = torch.nn.MSELoss()(x, pX1)
    yloss = torch.nn.MSELoss()(y, pY1)
    xyLoss = xloss + yloss
    return xyLoss

def GetABCD(pointList1, pointList2):
    with torch.no_grad():
        pX1 = pointList1[0]
        pY1 = pointList1[1]
        pOrtX1 = pointList1[2]
        pOrtY1 = pointList1[3]

        pX2 = pointList2[0]
        pY2 = pointList2[1]
        pOrtX2 = pointList2[2]
        pOrtY2 = pointList2[3]
        pointNum = len(pOrtX1)
        pA = []
        pB = []
        pC = []
        pD = []
        for i in range(pointNum):
            pA.append(-pOrtY1[i] * pOrtX2[i])
            pB.append(-pOrtY1[i] * pOrtY2[i])
            pC.append(pOrtX1[i] * pOrtX2[i])
            pD.append(pOrtX1[i] * pOrtY2[i])
        pA =  torch.Tensor(pA)
        pA = pA.unsqueeze(1)
        pB = torch.Tensor(pB)
        pB = pB.unsqueeze(1)
        pC = torch.Tensor(pC)
        pC = pC.unsqueeze(1)
        pD = torch.Tensor(pD)
        pD = pD.unsqueeze(1)
        pABCD = [pA, pB, pC, pD]
        return pABCD

def GetOriLoss(u, v, pX2, pY2, pABCD):
    xy2 = torch.cat([pX2, pY2], dim=1)
    xOut = u(xy2)
    yOut = v(xy2)
    udx = gradients(xOut, pX2, 1)
    udy = gradients(xOut, pY2, 1)

    vdx = gradients(yOut, pX2, 1)
    vdy = gradients(yOut, pY2, 1)

    pA = pABCD[0]
    pB = pABCD[1]
    pC = pABCD[2]
    pD = pABCD[3]
    tmp = pA*udx + pB*udy + pC*vdx + pD*vdx #这里行吗？
    oriLoss1 = tmp**2
    #oriLoss1 = sum(oriLoss1)
    oriLoss1 = max(oriLoss1)
    oriLoss2 = pA * vdx + pB * vdy - pC * udx - pD * udy
    oriLoss2 = sum(oriLoss2)

    return oriLoss1, oriLoss2

def GetOriLossNew(u, v, pX2, pY2, pOrtX2, pOrtY2, pABCD):
    xy2 = torch.cat([pX2, pY2], dim=1)
    xOut = u(xy2)
    yOut = v(xy2)
    udx = gradients(xOut, pX2, 1)
    udy = gradients(xOut, pY2, 1)

    vdx = gradients(yOut, pX2, 1)
    vdy = gradients(yOut, pY2, 1)

    pA = pABCD[0]
    pB = pABCD[1]
    pC = pABCD[2]
    pD = pABCD[3]

    oriLoss = pA * vdx + pB * vdy - pC * udx - pD * udy
    pTOrtX = udx * pOrtX2 + udy * pOrtY2
    pTOrtY = vdx * pOrtX2 + vdy * pOrtY2
    norm = pTOrtX ** 2 + pTOrtY ** 2
    norm = torch.sqrt(norm)
    oriLoss = oriLoss/norm
    oriLoss = sum(oriLoss)

    return torch.tensor(0, dtype=float), oriLoss

def GetOriLossNew2(u, v, pOrtX1, pOrtY1, pX2, pY2, pOrtX2, pOrtY2, flag):
    xy2 = torch.cat([pX2, pY2], dim=1)
    xOut = u(xy2)
    yOut = v(xy2)
    udx = gradients(xOut, pX2, 1)
    udy = gradients(xOut, pY2, 1)
    vdx = gradients(yOut, pX2, 1)
    vdy = gradients(yOut, pY2, 1)

    #oriLoss = pA * vdx + pB * vdy - pC * udx - pD * udy
    pTOrtX2 = udx * pOrtX2 + udy * pOrtY2
    pTOrtY2 = vdx * pOrtX2 + vdy * pOrtY2
    norm = pTOrtX2 ** 2 + pTOrtY2 ** 2
    norm = torch.sqrt(norm)
    oriLoss = -pOrtX1*pTOrtX2 - pOrtY1*pTOrtY2
    oriLoss = oriLoss/norm
    if -0.95 > max(oriLoss):
        flag = 0
    if 0 == flag:
        num = pX2.size(0)
        oriLoss = sum(oriLoss)/num
    else:
        oriLoss = max(oriLoss)

    return torch.tensor(0, dtype=float), oriLoss

def SamplePoints(xMin, xMax, yMin, yMax, n=1000):
    offset = 100
    xRange = xMax - xMin + offset
    yRange = yMax - yMin + offset
    x = torch.rand(n, 1)*xRange + xMin - offset/2
    y = torch.rand(n, 1)*yRange + yMin - offset/2
    return x.requires_grad_(True), y.requires_grad_(True)

def GetSmoothLoss(u, v, pX2, pY2, device):
    xMin = min(pX2).to('cpu')
    xMax = max(pX2).to('cpu')
    yMin = min(pY2).to('cpu')
    yMax = max(pY2).to('cpu')
    num=1000
    x, y = SamplePoints(xMin, xMax, yMin, yMax, n=num)
    x = x.to(device)
    y = y.to(device)
    xy = torch.cat([x, y], dim=1)
    tx = u(xy)
    ty = v(xy)
    udxx = gradients(tx, x, 2)
    udyy = gradients(tx, y, 2)
    udx = gradients(tx, x, 1)
    udxy = gradients(udx, y, 1)

    vdxx = gradients(ty, x, 2)
    vdyy = gradients(ty, y, 2)
    vdx = gradients(ty, x, 1)
    vdxy = gradients(vdx, y, 1)

    smoothLoss = udxx**2 + udyy**2 + 2*udxy**2 + vdxx**2 + vdyy**2 + 2*vdxy**2
    smoothLoss = sum(smoothLoss)/num

    return smoothLoss

def GetTransPoint(u, v, x, y, pOrtX, pOrtY):
    #with torch.no_grad():
    xy = torch.cat([x, y], dim=1)
    tx = u(xy)
    ty = v(xy)
    outList = [tx.squeeze(1), ty.squeeze(1)]
    if pOrtX is not None:
        udx = gradients(tx, x, 1)
        udy = gradients(tx, y, 1)

        vdx = gradients(ty, x, 1)
        vdy = gradients(ty, y, 1)

        pTOrtX = udx * pOrtX + udy * pOrtY
        pTOrtY = vdx * pOrtX + vdy * pOrtY
        norm = pTOrtX**2 + pTOrtY**2
        norm = torch.sqrt(norm)
        pTOrtX = pTOrtX / norm
        pTOrtY = pTOrtY / norm
        outList.append(pTOrtX.squeeze(1))
        outList.append(pTOrtY.squeeze(1))
    return outList

def GetTransFunUV(pX1, pY1, pOrtX1, pOrtY1, pX2, pY2, pOrtX2, pOrtY2, lambda3, device):
    u = MLP().to(device)
    v = MLP().to(device)

    pX1 = pX1.to(device)
    pY1 = pY1.to(device)
    pOrtX1 = pOrtX1.to(device)
    pOrtY1 = pOrtY1.to(device)
    pX2 = pX2.to(device)
    pY2 = pY2.to(device)
    pOrtX2 = pOrtX2.to(device)
    pOrtY2 = pOrtY2.to(device)

    l = 0.001
    optU = torch.optim.Adam(params=u.parameters(), lr=l)#weight_decay=0.01
    optV = torch.optim.Adam(params=v.parameters(), lr=l)#weight_decay=0.01
    pointNum = pX2.size(0)
    minXYLoss = -1
    minOriLoss = -1
    minSmoothLoss = -1
    conNum = 0
    flag = 0
    for i in range(10000):
        if 5000 <= i:
            flag = 1
        optU.zero_grad()
        optV.zero_grad()
        xyLoss = GetXYLoss(u, v, pX1, pY1, pX2, pY2)
        # oriLoss1, oriLoss2 = GetOriLoss(u, v, pX2, pY2, pABCD) #和传统tps损失函数一样，效果不好
        # oriLoss1, oriLoss2 = GetOriLossNew(u, v, pX2, pY2, pOrtX2.unsqueeze(1), pOrtY2.unsqueeze(1), pABCD)#和GetOriLossNew2效果一样，不再使用
        oriLoss1, oriLoss2 = GetOriLossNew2(u, v, pOrtX1.unsqueeze(1), pOrtY1.unsqueeze(1), pX2, pY2,
                                            pOrtX2.unsqueeze(1), pOrtY2.unsqueeze(1), flag)
        smoothLoss = GetSmoothLoss(u, v, pX2, pY2, device)

        with torch.no_grad():
            oriError = 1 + oriLoss2
            initLambd2 = 99
            lambda2 = initLambd2
            if 0 < xyLoss:
                lambda2 = oriError / xyLoss
            if initLambd2 > lambda2:
                lambda2 = initLambd2
            # lambda2 = 1
        loss = xyLoss + lambda2 * oriLoss2 + lambda3 * smoothLoss  # 9999
        # print("xyLoss:", xyLoss.data, "oriLoss1:", oriLoss1.data, "oriLoss2:", oriLoss2.data, "smoothLoss:", smoothLoss.data)
        print(i, ":", "lambda2:", lambda2, "xyLoss:", xyLoss.data, "oriLoss2:", oriLoss2.data, "smoothLoss:", smoothLoss.data, "lr:", optU.param_groups[0]['lr'])
        if 1 > xyLoss and 0.1 > oriError:
            uBak = copy.deepcopy(u)
            vBak = copy.deepcopy(v)
            minXYLoss = xyLoss
            minOriLoss = oriLoss2
            break

        loss.backward()
        optU.step()
        optV.step()
        # if 0 == i or (xyLoss < minXYLoss and oriLoss2 < minOriLoss):
        if 0 == i or (xyLoss < minXYLoss and oriLoss2 < minOriLoss) or (xyLoss + 3 < minXYLoss):
            uBak = copy.deepcopy(u)
            vBak = copy.deepcopy(v)
            minXYLoss = xyLoss
            minOriLoss = oriLoss2
            minSmoothLoss = smoothLoss
        if xyLoss > minXYLoss and oriLoss2 > minOriLoss:
            conNum = conNum + 1
        else:
            conNum = 0
        if 10 == conNum:
            if l > 0.001:
                l = 0.001
                u = copy.deepcopy(uBak)
                v = copy.deepcopy(vBak)
                optU = torch.optim.Adam(params=u.parameters(), lr=l)
                optV = torch.optim.Adam(params=v.parameters(), lr=l)
                conNum = 0
    print("minXYLoss:", minXYLoss, "minOriLoss:", minOriLoss, "minSmoothLoss:", smoothLoss)
    u = copy.deepcopy(uBak) #.to('cpu')
    v = copy.deepcopy(vBak) #.to('cpu')
    return u, v