import argparse
import os

import numpy as np
import scipy.linalg
import torch
import torchvision.transforms
from PIL import Image
import torchvision.transforms as T
# import util
# generate training batch
import yaml
import matplotlib.pyplot as plt

def genPerturbations(opt, batchsize):
    canon4pts = np.array([[-1, -1], [-1, 1], [1, 1], [1, -1]], dtype=np.float32)
    # image4pts = np.array([[0, 0], [0, opt.H - 1], [opt.W - 1, opt.H - 1], [opt.W - 1, 0]], dtype=np.float32)
    pertScale = opt['WARP_CONFIG']['pertScale']
    transScale = opt['WARP_CONFIG']['transScale']
    warptype = opt['WARP_CONFIG']['type']
    X = np.tile(canon4pts[:,0],[batchsize,1])
    Y = np.tile(canon4pts[:,1],[batchsize,1])
    O = np.zeros([batchsize,4],dtype=np.float32)
    I = np.ones([batchsize,4],dtype=np.float32)
    dX = np.random.randn(batchsize,4)*pertScale \
        +np.random.randn(batchsize,1)*transScale
    dY = np.random.randn(batchsize,4)*pertScale \
        +np.random.randn(batchsize,1)*transScale
    dX,dY = dX.astype(np.float32),dY.astype(np.float32)
    # fit warp parameters to generated displacements
    if "homography" in warptype:
        A = np.concatenate([np.stack([X,Y,I,O,O,O,-X*(X+dX),-Y*(X+dX)],axis=-1),
                            np.stack([O,O,O,X,Y,I,-X*(Y+dY),-Y*(Y+dY)],axis=-1)],axis=1)
        b = np.expand_dims(np.concatenate([X+dX,Y+dY],axis=1),axis=-1)
        pPert = np.matmul(np.linalg.inv(A),b).squeeze()
        pPert -= np.array([1,0,0,0,1,0,0,0])
    else:
        if "translation" in warptype:
            J = np.concatenate([np.stack([I,O],axis=-1),
                                np.stack([O,I],axis=-1)],axis=1)
        if "similarity" in warptype:
            J = np.concatenate([np.stack([X,Y,I,O],axis=-1),
                                np.stack([-Y,X,O,I],axis=-1)],axis=1)
        if "affine" in warptype:
            J = np.concatenate([np.stack([X,Y,I,O,O,O],axis=-1),
                                np.stack([O,O,O,X,Y,I],axis=-1)],axis=1)
        dXY = np.expand_dims(np.concatenate([dX,dY],axis=1),axis=-1)
        Jtransp = np.transpose(J,axes=[0,2,1])
        pPert = np.matmul(np.linalg.inv(np.matmul(Jtransp,J)),np.matmul(Jtransp,dXY)).squeeze()
    pInit = torch.from_numpy(pPert).to(opt['MODEL_CONFIG']['DEVICE'])
    return pInit


# fit (affine) warp between two sets of points 
def fit(Xsrc, Xdst):
    ptsN = len(Xsrc)
    X, Y, U, V, O, I = Xsrc[:, 0], Xsrc[:, 1], Xdst[:, 0], Xdst[:, 1], np.zeros([ptsN]), np.ones([ptsN])
    A = np.concatenate((np.stack([X, Y, I, O, O, O], axis=1),
                        np.stack([O, O, O, X, Y, I], axis=1)), axis=0)
    b = np.concatenate((U, V), axis=0)
    p1, p2, p3, p4, p5, p6 = scipy.linalg.lstsq(A, b)[0].squeeze()
    pMtrx = np.array([[p1, p2, p3], [p4, p5, p6], [0, 0, 1]], dtype=torch.float32)
    return pMtrx


# compute composition of warp parameters
def compose(opt, p, dp):
    pMtrx = vec2mtrx(opt, p)
    dpMtrx = vec2mtrx(opt, dp)
    pMtrxNew = dpMtrx.matmul(pMtrx)
    pMtrxNew = pMtrxNew / pMtrxNew[:, 2:3, 2:3]
    pNew = mtrx2vec(opt, pMtrxNew)
    return pNew


# compute inverse of warp parameters
def inverse(opt, p):
    pMtrx = vec2mtrx(opt, p)
    pInvMtrx = pMtrx.inverse()
    pInv = mtrx2vec(opt, pInvMtrx)
    return pInv


# convert warp parameters to matrix
def vec2mtrx(opt, p):
    batchsize = p.shape[0]
    O = torch.zeros(batchsize, dtype=torch.float32).to(opt['MODEL_CONFIG']['DEVICE'])
    I = torch.ones(batchsize, dtype=torch.float32).to(opt['MODEL_CONFIG']['DEVICE'])
    warptype = opt['WARP_CONFIG']['type']
    if "translation" in warptype:
        tx, ty = torch.unbind(p, dim=1)
        pMtrx = torch.stack([torch.stack([I, O, tx], dim=-1),
                             torch.stack([O, I, ty], dim=-1),
                             torch.stack([O, O, I], dim=-1)], dim=1)
    if "similarity" in warptype:
        pc, ps, tx, ty = torch.unbind(p, dim=1)
        pMtrx = torch.stack([torch.stack([I + pc, -ps, tx], dim=-1),
                             torch.stack([ps, I + pc, ty], dim=-1),
                             torch.stack([O, O, I], dim=-1)], dim=1)
    if "affine" in warptype:
        p1, p2, p3, p4, p5, p6 = torch.unbind(p, dim=1)
        pMtrx = torch.stack([torch.stack([I + p1, p2, p3], dim=-1),
                             torch.stack([p4, I + p5, p6], dim=-1),
                             torch.stack([O, O, I], dim=-1)], dim=1)
    if "homography" in warptype:
        p1, p2, p3, p4, p5, p6, p7, p8 = torch.unbind(p, dim=1)
        pMtrx = torch.stack([torch.stack([I + p1, p2, p3], dim=-1),
                             torch.stack([p4, I + p5, p6], dim=-1),
                             torch.stack([p7, p8, I], dim=-1)], dim=1)
    return pMtrx


# convert warp matrix to parameters
def mtrx2vec(opt, pMtrx):
    [row0, row1, row2] = torch.unbind(pMtrx, dim=1)
    [e00, e01, e02] = torch.unbind(row0, dim=1)
    [e10, e11, e12] = torch.unbind(row1, dim=1)
    [e20, e21, e22] = torch.unbind(row2, dim=1)
    warptype = opt['WARP_CONFIG']['type']
    if "translation" in warptype: p = torch.stack([e02, e12], dim=1)
    if "similarity" in warptype: p = torch.stack([e00 - 1, e10, e02, e12], dim=1)
    if "affine" in warptype: p = torch.stack([e00 - 1, e01, e02, e10, e11 - 1, e12], dim=1)
    if "homography" in warptype: p = torch.stack([e00 - 1, e01, e02, e10, e11 - 1, e12, e20, e21], dim=1)
    return p


# warp the image
def transformImage(opt, image, pMtrx):
    bs = image.shape[0]
    H = image.shape[2]
    W = image.shape[3]
    refMtrx = np.eye(3).astype(np.float32)
    refMtrx = torch.from_numpy(refMtrx).to(opt['MODEL_CONFIG']['DEVICE'])
    refMtrx = refMtrx.repeat(bs, 1, 1)
    transMtrx = refMtrx.matmul(pMtrx)
    # warp the canonical coordinates
    X, Y = np.meshgrid(np.linspace(-1, 1, opt['MODEL_CONFIG']['IMAGE_SIZE']), np.linspace(-1, 1, opt['MODEL_CONFIG']['IMAGE_SIZE']))
    X, Y = X.flatten(), Y.flatten()
    XYhom = np.stack([X, Y, np.ones_like(X)], axis=1).T
    XYhom = np.tile(XYhom, [bs, 1, 1]).astype(np.float32)
    XYhom = torch.from_numpy(XYhom).to(opt['MODEL_CONFIG']['DEVICE'])
    XYwarpHom = transMtrx.matmul(XYhom)
    XwarpHom, YwarpHom, ZwarpHom = torch.unbind(XYwarpHom, dim=1)
    Xwarp = (XwarpHom / (ZwarpHom + 1e-8)).reshape(bs, H, W)
    Ywarp = (YwarpHom / (ZwarpHom + 1e-8)).reshape(bs, H, W)
    grid = torch.stack([Xwarp, Ywarp], dim=-1)
    # sampling with bilinear interpolation
    imageWarp = torch.nn.functional.grid_sample(image, grid, mode="bilinear", align_corners=True)
    return imageWarp

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str, default='../config.yaml', help='input config yaml file')
    params = parser.parse_args()

    if os.path.exists(params.config):
        config = yaml.load(open(params.config, 'r'), Loader=yaml.FullLoader)
    else:
        print("Please check your config yaml file")

    bs = 2
    pInit = genPerturbations(config, bs)
    pInitMatrix = vec2mtrx(config, pInit)

    # get img
    img1 = Image.open('./1.png')
    img2 = Image.open('./2.png')
    totensor = T.Compose([T.ToTensor(), T.Normalize((.5,)*3,(.5,)*3)])
    img1 = totensor(img1)
    img2 = totensor(img2)
    img1 = torch.unsqueeze(img1,dim=0)
    img2 = torch.unsqueeze(img2,dim=0)
    img = torch.cat([img1,img2], dim=0)
    warp_img = transformImage(config, img, pInitMatrix)
    toPIL = T.ToPILImage()
    warp_img = warp_img *0.5 + 0.5
    img1 = toPIL(warp_img[1])

    plt.imshow(img1)
    plt.show()