#This program solves the Navier Stokes equation. It uses the method of
#constructed solutions to test for convergence, which should be 2nd order 

#from __future__ import division
import numpy as np
import numpy.fft as fft
from numpy import pi 
from pyfft.cl import Plan
import pyopencl as cl
import pyopencl.array as cl_array
from pyopencl.elementwise import ElementwiseKernel
import time
import sys
from pyvisfile.vtk import write_structured_grid
from pyvisfile.vtk import (
           UnstructuredGrid, DataArray,
           AppendedDataXMLGenerator,
           VTK_VERTEX, VF_LIST_OF_VECTORS, VF_LIST_OF_COMPONENTS)
#np.set_printoptions(threshold=sys.maxint)
from warnings import filterwarnings
filterwarnings("ignore", module="pyopencl.cache", lineno=336)
filterwarnings("ignore", module="pyopencl", lineno=163)
TIMING = 0 
WGPSIZE_1 = 4 
WGPSIZE_2 = 4 
b_WGPSIZE = 1   
PR_INT = 0
SAVE = 0
DISP_INT = 100
WRITE_NORM = 0
start_size = 6
end_size = 9 

if WRITE_NORM:
    f = open('/home/tfai/workfile2', 'w')
#f2 = open('/home/tfai/workfile2', 'w')

def calc_transf(dx):
    N = np.int(np.round(1/dx,0))
    k = np.arange(0, N/2+1)
    k = np.hstack([k, np.arange(-N/2+1, 0)])
    k1 = np.tile(k, (N, 1))
    k2 = k1.T.copy()
    L_tf = -4./dx**2 * (np.sin(pi*k1/N)**2+np.sin(pi*k2/N)**2) 
    L_tf[0,0] = 1. #avoid dividing by zero
    return L_tf

def calc_mu():
    global mu_1_gpu, mu_2_gpu
    #mu_1 = -mu_0*5./6.*np.random.rand(N,N)
    #mu_2 = -mu_0*5./6.*np.random.rand(N,N)
    #mu_1 = -10*0.05+0.05*10*0.5*(1.0+np.cos(2*pi*xx_1_stag)*np.cos(2*pi*xx_2_stag))
    #mu_2 = -10*0.05+0.05*10*0.5*(1.0+np.cos(2*pi*xx_1_c)*np.cos(2*pi*xx_2_c))
    #mu_1 = 0.4*mu_0*(0.5-np.random.rand(N,N))
    #mu_2 = 0.4*mu_0*(0.5-np.random.rand(N,N))
    mu_tilde_1 = mu_0*mu_1*0.5*(1.0+np.cos(2*pi*xx_1_stag)*np.cos(2*pi*xx_2_c))
    mu_tilde_2 = mu_0*mu_1*0.5*(1.0+np.cos(2*pi*xx_1_c)*np.cos(2*pi*xx_2_stag))
    #mu_tilde_1 = mu_tilde_1 - mu_bar
    #mu_tilde_2 = mu_tilde_2 - mu_bar
    mu_1_gpu = cl_array.to_device(queue, mu_tilde_1.astype(np.float64)) 
    mu_2_gpu = cl_array.to_device(queue, mu_tilde_2.astype(np.float64)) 

def calc_rho():
    global rho_1_gpu, rho_2_gpu
    #rho_1 = -rho_0*5./6.*np.random.rand(N,N)
    #rho_2 = -rho_0*5./6.*np.random.rand(N,N)
    #rho_1 = 0*0.5*(1.0+np.sin(2*pi*xx_1_stag)*np.sin(2*pi*xx_2_c))
    #rho_2 = 0*0.5*(1.0+np.sin(2*pi*xx_1_c)*np.sin(2*pi*xx_2_stag))
    #rho_1 = rho_0*(0.5-np.random.rand(N,N))
    #rho_2 = rho_0*(0.5-np.random.rand(N,N))
    rho_tilde_1 = rho_0*rho_1*0.5*(1.0+np.sin(2*pi*xx_1_stag)*np.sin(2*pi*xx_2_c))
    rho_tilde_2 = rho_0*rho_1*0.5*(1.0+np.sin(2*pi*xx_1_c)*np.sin(2*pi*xx_2_stag))
    #rho_tilde_1 = rho_tilde_1 - rho_bar
    #rho_tilde_2 = rho_tilde_2 - rho_bar
    rho_1_gpu = cl_array.to_device(queue, rho_tilde_1.astype(np.float64)) 
    rho_2_gpu = cl_array.to_device(queue, rho_tilde_2.astype(np.float64)) 

def calc_inds(N): #calculate shifted indices
    a1 = range(0,N)
    i_1 = np.tile(a1, (N, 1))
    i_2 = i_1.T.copy()
    a2 = np.hstack([range(1, N), 0])
    ip_1 = np.tile(a2, (N,1))
    ip_2 = ip_1.T.copy()
    a3 = np.hstack([N-1, range(0, N-1)])
    im_1 = np.tile(a3, (N,1))
    im_2 = im_1.T.copy()
    return i_1, i_2, ip_1, ip_2, im_1, im_2

def solve(t_step):
    global dx, dt, mu_0, rho_0, b_q, dtheta
    N = np.int(np.round(1/dx,0))

    ##change mu_hat for predictor
    #mu_hat = 0.5*(mu_max+mu_min)

    #print 'u_1 is', u_1_gpu.get().max()
    #print 'u_2 is', u_2_gpu.get().max()
    imag_gpu(u_1_gpu, u_1_tf_gpu)
    imag_gpu(u_2_gpu, u_2_tf_gpu)

    plan.execute(u_1_tf_gpu.data)
    plan.execute(u_2_tf_gpu.data)
    
    zero_gpu(f_1_gpu)
    zero_gpu(f_2_gpu)
    zero_gpu(u_1_tilde_gpu)
    zero_gpu(u_2_tilde_gpu)
    #zero_gpu(mu_1_gpu)
    #zero_gpu(mu_2_gpu)
    #zero_gpu(rho_1_gpu)
    #zero_gpu(rho_2_gpu)

    calc_f_gpu(dt*t_step, dx, mu_0, mu_1, rho_0, rho_1_tr_gpu, rho_2_tr_gpu, xx_1_stag_gpu, xx_2_stag_gpu, xx_1_c_gpu, xx_2_c_gpu, f_1_gpu, f_2_gpu) 
    #calc_f_gpu(dt*(t_step-1.0), dx, mu_0, mu_1, rho_0, rho_1_tr_gpu, rho_2_tr_gpu, xx_1_stag_gpu, xx_2_stag_gpu, xx_1_c_gpu, xx_2_c_gpu, f_1_gpu, f_2_gpu) 

    #print rho_1_gpu.get().max()
    #check power identity
    #print 'power1 is', (U_1_gpu.get()*Rho_gpu.get()+U_2_gpu.get()*Rho_gpu.get()).sum()*dtheta-(u_1_gpu.get()*rho_1_gpu.get()+u_2_gpu.get()*rho_2_gpu.get()).sum()*dx*dx
    #print 'power2 is', (U_1_gpu.get()*F_1_gpu.get()+U_2_gpu.get()*F_2_gpu.get()).sum()*dtheta-(u_1_gpu.get()*u_1_tilde_gpu.get()+u_2_gpu.get()*u_2_tilde_gpu.get()).sum()*dx*dx
    #check momentum conservation
    #print 'mom1 is', (u_1_gpu.get()*(rho_1_gpu.get()+rho_0)).sum()*dx*dx
    #print 'mom2 is', (u_2_gpu.get()*(rho_2_gpu.get()+rho_0)).sum()*dx*dx
    #print 'mom1 is', u_1_gpu.get().sum()*dx*dx
    #print 'mom2 is', u_2_gpu.get().sum()*dx*dx
    if PR_INT:
        copy_gpu(mu_1_gpu, mu_1_plot_gpu)
        copy_gpu(mu_2_gpu, mu_2_plot_gpu)
        vort_kernel(queue, (N,N), (WGPSIZE_1, WGPSIZE_2), u_1_gpu.data, u_2_gpu.data, dx, N, vort_gpu.data)

    add_gpu(f_1_gpu, u_1_tilde_gpu, 1, f_1_gpu) 
    add_gpu(f_2_gpu, u_2_tilde_gpu, 1, f_2_gpu) 

    #calc_mu()
    vc_laplace_kernel(queue, (N,N), (WGPSIZE_1, WGPSIZE_2), u_1_gpu.data, u_2_gpu.data, mu_1_gpu.data, mu_2_gpu.data, dx, N, u_1_tilde_gpu.data, u_2_tilde_gpu.data) 

    add_gpu(f_1_gpu, u_1_tilde_gpu, 1.0, f_1_gpu) 
    add_gpu(f_2_gpu, u_2_tilde_gpu, 1.0, f_2_gpu) 

    ss_conv_kernel(queue, (N,N), (WGPSIZE_1, WGPSIZE_2), u_1_gpu.data, u_2_gpu.data, dx, N, u_1_tilde_gpu.data, u_2_tilde_gpu.data)
    zero_gpu(u_1_tilde_gpu) #turn off convection
    zero_gpu(u_2_tilde_gpu)

    add_gpu(f_1_gpu, u_1_tilde_gpu, -rho_hat, f_1_gpu)
    add_gpu(f_2_gpu, u_2_tilde_gpu, -rho_hat, f_2_gpu)

    #calc_rho()
    add_scalar_gpu(rho_1_gpu, rho_0-rho_hat, rho_1_gpu)
    add_scalar_gpu(rho_2_gpu, rho_0-rho_hat, rho_2_gpu)
    dal_term_kernel(queue, (N,N), (WGPSIZE_1, WGPSIZE_2), u_1_gpu.data, u_2_gpu.data, u_1_o_gpu.data, u_2_o_gpu.data, u_1_tilde_gpu.data, u_2_tilde_gpu.data, rho_1_gpu.data, rho_2_gpu.data, dt, N, dp_1_gpu.data, dp_2_gpu.data)
    add_scalar_gpu(rho_1_gpu, -rho_0+rho_hat, rho_1_gpu)
    add_scalar_gpu(rho_2_gpu, -rho_0+rho_hat, rho_2_gpu)

    copy_gpu(u_1_gpu, u_1_o_gpu)
    copy_gpu(u_2_gpu, u_2_o_gpu)

    copy_gpu(f_1_gpu, u_1_tilde_gpu)
    copy_gpu(f_2_gpu, u_2_tilde_gpu)

    add_gpu(f_1_gpu, dp_1_gpu, -1.0, f_1_gpu)
    add_gpu(f_2_gpu, dp_2_gpu, -1.0, f_2_gpu)
    ##########################################################solve for u_tilde
    div_kernel(queue, (N,N), (WGPSIZE_1,WGPSIZE_2), f_1_gpu.data, f_2_gpu.data, dx, N, p_gpu.data)
    imag_gpu(p_gpu, p_tf_gpu)
    plan.execute(p_tf_gpu.data)

    L_adj_2_kernel(queue, (1,1), (1,1), L_tf_gpu.data)
    calc_p_gpu(p_tf_gpu, L_tf_gpu, p_tf_gpu)
    p_filt_kernel(queue, (1,1), (1,1), p_tf_gpu.data)
    plan.execute(p_tf_gpu.data, inverse=True)
    real_gpu(p_tf_gpu, p_gpu)
    #if np.sum(np.abs(np.imag(p_gpu.get()))) > 1e-3:
    #    print 'imag part of p too big'

    grad_kernel(queue, (N,N), (WGPSIZE_1,WGPSIZE_2), p_gpu.data, dx, N, dp_1_gpu.data, dp_2_gpu.data)

    add_gpu(f_1_gpu, dp_1_gpu, -1, f_1_gpu)
    add_gpu(f_2_gpu, dp_2_gpu, -1, f_2_gpu)

    imag_gpu(f_1_gpu, f_1_tf_gpu)
    imag_gpu(f_2_gpu, f_2_tf_gpu)

    plan.execute(f_1_tf_gpu.data)
    plan.execute(f_2_tf_gpu.data)

    L_adj_1_kernel(queue, (1,1), (1,1), L_tf_gpu.data)
    solve_kernel(queue, (N,N), (WGPSIZE_1, WGPSIZE_2), u_1_tf_gpu.data, u_2_tf_gpu.data, L_tf_gpu.data, f_1_tf_gpu.data, f_2_tf_gpu.data, dt, rho_hat, mu_0-mu_hat, mu_hat, N, f_1_tf_gpu.data, f_2_tf_gpu.data)

    nyq_filt_kernel(queue, (N,1), (WGPSIZE_1, 1), f_1_tf_gpu.data, N)
    nyq_filt_kernel(queue, (N,1), (WGPSIZE_1, 1), f_2_tf_gpu.data, N)

    zero_gpu(f_1_gpu)
    zero_gpu(f_2_gpu)

    add_gpu(f_1_gpu, u_1_tilde_gpu, 0.5, f_1_gpu)
    add_gpu(f_2_gpu, u_2_tilde_gpu, 0.5, f_2_gpu)

    plan.execute(f_1_tf_gpu.data, inverse=True)
    plan.execute(f_2_tf_gpu.data, inverse=True)
    
    real_gpu(f_1_tf_gpu, u_1_tilde_gpu)
    real_gpu(f_2_tf_gpu, u_2_tilde_gpu)

    #real_gpu(f_1_tf_gpu, u_1_gpu)
    #real_gpu(f_2_tf_gpu, u_2_gpu)
    #return

    #print 'u_1 is', u_1_tilde_gpu.get().max()
    #print 'u_2 is', u_2_tilde_gpu.get().max()
    ############################################################

    ############################################################solve for u_soln
    ##change mu_hat for corrector
    #mu_hat = mu_min + mu_max

    #zero_gpu(mu_1_gpu)
    #zero_gpu(mu_2_gpu)
    #zero_gpu(rho_1_gpu)
    #zero_gpu(rho_2_gpu)

    copy_gpu(f_1_gpu, mu_1_plot_gpu)
    copy_gpu(f_2_gpu, mu_2_plot_gpu)

    for iter in range(0,n_iter): 
        calc_f_gpu(dt*(t_step-1.0), dx, mu_0, mu_1, rho_0, rho_1_tr_gpu, rho_2_tr_gpu, xx_1_stag_gpu, xx_2_stag_gpu, xx_1_c_gpu, xx_2_c_gpu, u_1_gpu, u_2_gpu) 
        #calc_f_gpu(dt*t_step, dx, mu_0, mu_1, rho_0, rho_1_tr_gpu, rho_2_tr_gpu, xx_1_stag_gpu, xx_2_stag_gpu, xx_1_c_gpu, xx_2_c_gpu, u_1_gpu, u_2_gpu) 

        add_gpu(mu_1_plot_gpu, u_1_gpu, 0.5, f_1_gpu)
        add_gpu(mu_2_plot_gpu, u_2_gpu, 0.5, f_2_gpu)

        #calc_mu()
        add_scalar_gpu(mu_1_gpu, mu_0-mu_hat, mu_1_gpu)
        add_scalar_gpu(mu_2_gpu, mu_0-mu_hat, mu_2_gpu)
        vc_laplace_kernel(queue, (N,N), (WGPSIZE_1, WGPSIZE_2), u_1_tilde_gpu.data, u_2_tilde_gpu.data, mu_1_gpu.data, mu_2_gpu.data, dx, N, u_1_gpu.data, u_2_gpu.data) 
        add_scalar_gpu(mu_1_gpu, -mu_0+mu_hat, mu_1_gpu)
        add_scalar_gpu(mu_2_gpu, -mu_0+mu_hat, mu_2_gpu)

        add_gpu(f_1_gpu, u_1_gpu, 0.5, f_1_gpu)
        add_gpu(f_2_gpu, u_2_gpu, 0.5, f_2_gpu)

        ss_conv_kernel(queue, (N,N), (WGPSIZE_1, WGPSIZE_2), u_1_tilde_gpu.data, u_2_tilde_gpu.data, dx, N, u_1_gpu.data, u_2_gpu.data)
        zero_gpu(u_1_gpu) #turn off convection
        zero_gpu(u_2_gpu)

        add_gpu(f_1_gpu, u_1_gpu, -rho_hat*0.5, f_1_gpu)
        add_gpu(f_2_gpu, u_2_gpu, -rho_hat*0.5, f_2_gpu)

        ss_conv_kernel(queue, (N,N), (WGPSIZE_1, WGPSIZE_2), u_1_o_gpu.data, u_2_o_gpu.data, dx, N, dp_1_gpu.data, dp_2_gpu.data)
        zero_gpu(dp_1_gpu) #turn off convection
        zero_gpu(dp_2_gpu)

        add_gpu(u_1_gpu, dp_1_gpu, 1.0, u_1_gpu)
        add_gpu(u_2_gpu, dp_2_gpu, 1.0, u_2_gpu)

        scale_gpu(u_1_gpu, 0.5, u_1_gpu)
        scale_gpu(u_2_gpu, 0.5, u_2_gpu)

        #calc_rho()
        add_scalar_gpu(rho_1_gpu, rho_0-rho_hat, rho_1_gpu)
        add_scalar_gpu(rho_2_gpu, rho_0-rho_hat, rho_2_gpu)
        dal_term_kernel(queue, (N,N), (WGPSIZE_1, WGPSIZE_2), u_1_tilde_gpu.data, u_2_tilde_gpu.data, u_1_o_gpu.data, u_2_o_gpu.data, u_1_gpu.data, u_2_gpu.data, rho_1_gpu.data, rho_2_gpu.data, dt, N, u_1_gpu.data, u_2_gpu.data)
        add_scalar_gpu(rho_1_gpu, -rho_0+rho_hat, rho_1_gpu)
        add_scalar_gpu(rho_2_gpu, -rho_0+rho_hat, rho_2_gpu)

        add_gpu(f_1_gpu, u_1_gpu, -1.0, f_1_gpu)
        add_gpu(f_2_gpu, u_2_gpu, -1.0, f_2_gpu)

        div_kernel(queue, (N,N), (WGPSIZE_1,WGPSIZE_2), f_1_gpu.data, f_2_gpu.data, dx, N, p_gpu.data)
        imag_gpu(p_gpu, p_tf_gpu)
        plan.execute(p_tf_gpu.data)

        L_adj_2_kernel(queue, (1,1), (1,1), L_tf_gpu.data)
        calc_p_gpu(p_tf_gpu, L_tf_gpu, p_tf_gpu)
        p_filt_kernel(queue, (1,1), (1,1), p_tf_gpu.data)
        plan.execute(p_tf_gpu.data, inverse=True)
        real_gpu(p_tf_gpu, p_gpu)
        #if np.sum(np.abs(np.imag(p_gpu.get()))) > 1e-3:
        #    print 'imag part of p too big'

        grad_kernel(queue, (N,N), (WGPSIZE_1,WGPSIZE_2), p_gpu.data, dx, N, dp_1_gpu.data, dp_2_gpu.data)

        add_gpu(f_1_gpu, dp_1_gpu, -1, f_1_gpu)
        add_gpu(f_2_gpu, dp_2_gpu, -1, f_2_gpu)

        imag_gpu(f_1_gpu, f_1_tf_gpu)
        imag_gpu(f_2_gpu, f_2_tf_gpu)

        plan.execute(f_1_tf_gpu.data)
        plan.execute(f_2_tf_gpu.data)

        L_adj_1_kernel(queue, (1,1), (1,1), L_tf_gpu.data)
        solve_kernel(queue, (N,N), (WGPSIZE_1, WGPSIZE_2), u_1_tf_gpu.data, u_2_tf_gpu.data, L_tf_gpu.data, f_1_tf_gpu.data, f_2_tf_gpu.data, dt, rho_hat, mu_0/2.0, mu_hat/2.0, N, f_1_tf_gpu.data, f_2_tf_gpu.data)

        nyq_filt_kernel(queue, (N,1), (WGPSIZE_1, 1), f_1_tf_gpu.data, N)
        nyq_filt_kernel(queue, (N,1), (WGPSIZE_1, 1), f_2_tf_gpu.data, N)

        plan.execute(f_1_tf_gpu.data, inverse=True)
        plan.execute(f_2_tf_gpu.data, inverse=True)

        real_gpu(f_1_tf_gpu, u_1_tilde_gpu)
        real_gpu(f_2_tf_gpu, u_2_tilde_gpu)

    copy_gpu(u_1_tilde_gpu, u_1_gpu)
    copy_gpu(u_2_tilde_gpu, u_2_gpu)
    ##############################################################
    return

def err1(clock_max, u_1, u_2, p):
    global dx, dt
    x_1_stag = np.arange(-0.5, 0.5, dx)
    x_2_stag = np.arange(-0.5, 0.5, dx)
    xx_1_stag, xx_2_stag = np.meshgrid(x_1_stag, x_2_stag)

    x_1_c = np.arange(-0.5+dx/2, 0.5+dx/2, dx)
    x_2_c = np.arange(-0.5+dx/2, 0.5+dx/2, dx)
    xx_1_c, xx_2_c = np.meshgrid(x_1_c, x_2_c)
 
    u_1_ex = 2*np.cos(2*pi*xx_1_stag)*np.sin(4*pi*xx_2_c)*np.sin(clock_max*dt)
    u_2_ex = -np.sin(2*pi*xx_1_c)*np.cos(4*pi*xx_2_stag)*np.sin(clock_max*dt)
    p_ex = np.sin(2*pi*xx_1_c)*np.sin(2*pi*xx_2_c)*np.cos((clock_max-0.5)*dt)
    errinf1 = np.abs(u_1-u_1_ex).max()
    errinf2 = np.abs(u_2-u_2_ex).max()
    errinfp = np.abs(p-p_ex).max()
    err21 = np.sqrt(np.sum(np.abs(u_1-u_1_ex)**2*dx**2))
    err22 = np.sqrt(np.sum(np.abs(u_2-u_2_ex)**2*dx**2))
    err2p = np.sqrt(np.sum(np.abs(p-p_ex)**2*dx**2))

    return errinf1, errinf2, err21, err22, errinfp, err2p

def err(u_1, u_2, u_1_o, u_2_o):
    global dx, dtheta
    NN_o = u_1_o.shape[0]
    errinf1 = 0.
    errinf2 = 0.
    err11 = 0.
    err12 = 0.
    err21 = 0.
    err22 = 0.

    NN_r_o = 3*NN_o/8
    NN_s_o = 75*NN_o/16
    NN_r = 3*NN_o/4
    NN_s = 75*NN_o/8

    for i in range(0,NN_o):
        for j in range(0,NN_o):
            if (np.abs(u_1_o[j,i]-0.5*(u_1[2*j,2*i]+u_1[2*j+1,2*i]))>errinf1):
                errinf1 = np.abs(u_1_o[j,i]-0.5*(u_1[2*j,2*i]+u_1[2*j+1,2*i]))
            if (np.abs(u_2_o[j,i]-0.5*(u_2[2*j,2*i]+u_2[2*j,2*i+1]))>errinf2):
                errinf2 = np.abs(u_2_o[j,i]-0.5*(u_2[2*j,2*i]+u_2[2*j,2*i+1]))
            err21 += np.abs(u_1_o[j,i]-0.5*(u_1[2*j,2*i]+u_1[2*j+1,2*i]))**2
            err22 += np.abs(u_2_o[j,i]-0.5*(u_2[2*j,2*i]+u_2[2*j,2*i+1]))**2
            err11 += np.abs(u_1_o[j,i]-0.5*(u_1[2*j,2*i]+u_1[2*j+1,2*i]))
            err12 += np.abs(u_2_o[j,i]-0.5*(u_2[2*j,2*i]+u_2[2*j,2*i+1]))
            if (np.abs(p_o[j,i]-0.25*(p[2*j,2*i]+p[2*j+1,2*i]+p[2*j,2*i+1]+p[2*j+1,2*i+1]))>errinfp):
                errinfp = np.abs(p_o[j,i]-0.25*(p[2*j,2*i]+p[2*j+1,2*i]+p[2*j,2*i+1]+p[2*j+1,2*i+1]))
            err2p += np.abs(p_o[j,i]-0.25*(p[2*j,2*i]+p[2*j+1,2*i]+p[2*j,2*i+1]+p[2*j+1,2*i+1]))**2
            err1p += np.abs(p_o[j,i]-0.25*(p[2*j,2*i]+p[2*j+1,2*i]+p[2*j,2*i+1]+p[2*j+1,2*i+1])) 

    err21 = (dx/2)*np.sqrt(err21)
    err22 = (dx/2)*np.sqrt(err22)
    err11 = (dx**2/4)*err11
    err12 = (dx**2/4)*err12
    err2p = (dx/2)*np.sqrt(err2p)
    err1p = (dx**2/4)*err1p

    return errinf1, errinf2, err11, err12, err21, err22 

ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
Nyq_gpu = ElementwiseKernel(ctx,
	"double2 *u, double *filt, double2 *f",
	"f[i] = filt[i]*u[i]",
	"Nyq_gpu",
	preamble="""
        #pragma OPENCL EXTENSION cl_khr_fp64 : enable
	""")

mult_gpu = ElementwiseKernel(ctx,
	"double *u_1, double *u_2, double *f",
	"f[i] = u_1[i]*u_2[i]",
	"mult_gpu",
	preamble="""
        #pragma OPENCL EXTENSION cl_khr_fp64 : enable
	""")

add_gpu = ElementwiseKernel(ctx,
	"double *u_1, double *u_2, double a, double *f",
	"f[i] = u_1[i]+a*u_2[i]",
	"add_gpu",
	preamble="""
        #pragma OPENCL EXTENSION cl_khr_fp64 : enable
	""")
    
add_scalar_gpu = ElementwiseKernel(ctx,
	"double *u, double a, double *f",
	"f[i] = u[i]+a",
	"add_scalar_gpu",
	preamble="""
        #pragma OPENCL EXTENSION cl_khr_fp64 : enable
	""")
    
scale_gpu = ElementwiseKernel(ctx,
	"double *u, double a, double *f",
	"f[i] = a*u[i]",
	"scale_gpu",
	preamble="""
        #pragma OPENCL EXTENSION cl_khr_fp64 : enable
	""")

copy_gpu = ElementwiseKernel(ctx,
	"double *u, double *f",
	"f[i] = u[i]",
	"copy_gpu",
	preamble="""
        #pragma OPENCL EXTENSION cl_khr_fp64 : enable
	""")

real_gpu = ElementwiseKernel(ctx,
	"double2 *x, double *z",
	"z[i] = x[i].x",
	"real_gpu",
	preamble="""
        #pragma OPENCL EXTENSION cl_khr_fp64 : enable
	""")

imag_gpu = ElementwiseKernel(ctx,
	"double *x, double2 *z",
	"z[i].x = x[i];"
	"z[i].y = 0",
	"imag_gpu",
	preamble="""
        #pragma OPENCL EXTENSION cl_khr_fp64 : enable
	""")

calc_f_gpu = ElementwiseKernel(ctx,
        "double t, double dx, double mu_0, double mu_1, double rho_0, "
        "double *rho_tilde_lr, double *rho_tilde_tb, "
        "double *xx_1_stag, double *xx_2_stag, "
        "double *xx_1_c, double *xx_2_c, "
        "double *f_1, double *f_2",
        "f_1[i] = (rho_0+rho_tilde_lr[i])*2*cos(2*pi*xx_1_stag[i])*sin(4*pi*xx_2_c[i])*cos(t)"
           #"-(rho_0+rho_tilde_lr[i])*4*pi*sin(t)*sin(t)*sin(4*pi*xx_1_stag[i])"
           "+2*pi*cos(2*pi*xx_1_stag[i])*sin(2*pi*xx_2_c[i])*cos(t)"
           "+mu_0*(1.0+0.5*mu_1)*40*pi*pi*cos(2*pi*xx_1_stag[i])*sin(4*pi*xx_2_c[i])*sin(t)"
           "+16*pi*pi*0.5*mu_0*mu_1*cos(4*pi*xx_1_stag[i])*cos(2*pi*xx_2_c[i])*sin(4*pi*xx_2_c[i])*sin(t)"
           "+0.5*mu_0*mu_1*12*pi*pi*cos(2*pi*xx_1_stag[i])*cos(2*pi*xx_1_stag[i])"
           "*sin(t)*(sin(2*pi*xx_2_c[i])*cos(4*pi*xx_2_c[i])"
           "+2*cos(2*pi*xx_2_c[i])*sin(4*pi*xx_2_c[i]));"
        "f_2[i] = -(rho_0+rho_tilde_tb[i])*sin(2*pi*xx_1_c[i])*cos(4*pi*xx_2_stag[i])*cos(t)"
           #"-(rho_0+rho_tilde_tb[i])*2*pi*sin(t)*sin(t)*sin(8*pi*xx_2_stag[i])"
           "+2*pi*sin(2*pi*xx_1_c[i])*cos(2*pi*xx_2_stag[i])*cos(t)"
           "-mu_0*(1.0+0.5*mu_1)*20*pi*pi*sin(2*pi*xx_1_c[i])*cos(4*pi*xx_2_stag[i])*sin(t)"
           "+12*pi*pi*0.5*mu_0*mu_1*sin(4*pi*xx_1_c[i])*cos(2*pi*xx_2_stag[i])*cos(4*pi*xx_2_stag[i])*sin(t)"
           "-8*pi*pi*0.5*mu_0*mu_1*sin(4*pi*xx_1_c[i])*sin(t)*(-sin(2*pi*xx_2_stag[i])*sin(4*pi*xx_2_stag[i])"
           "+2*cos(2*pi*xx_2_stag[i])*cos(4*pi*xx_2_stag[i]))",
        "calc_f_gpu",
        preamble="""
	#define pi (double)3.141592653589793
        #pragma OPENCL EXTENSION cl_khr_fp64 : enable
        """)

zero_gpu = ElementwiseKernel(ctx,
	"double *x",
	"x[i] = 0",
	"zero_gpu",
	preamble="""
        #pragma OPENCL EXTENSION cl_khr_fp64 : enable
	""")
 
null_imag_gpu = ElementwiseKernel(ctx,
	"double2 *x, double2 *z",
	"z[i].x = x[i].x;"
	"z[i].y = 0",
	"null_imag_gpu",
	preamble="""
        #pragma OPENCL EXTENSION cl_khr_fp64 : enable
	""")

calc_p_gpu = ElementwiseKernel(ctx,
	"double2 *w_tf, double2 *L_tf, double2 *p_tf",
	"p_tf[i] = complex_div(w_tf[i], L_tf[i])",
	"calc_p_gpu",
	preamble="""
        #pragma OPENCL EXTENSION cl_khr_fp64 : enable
	#define complex_ctr(x, y) (double2)(x, y)
	#define complex_div(a, b) 1/((b).x*(b).x+(b).y*(b).y)*complex_ctr((a).y*(b).y+(a).x*(b).x,(a).y*(b).x-(a).x*(b).y)
	""")

if False:
    solve_gpu = ElementwiseKernel(ctx,
    "double2 *u_1_tf, double2 *u_2_tf, double2 *L_tf,"
    "double2 *f_1_tf, double2 *f_2_tf, double2 *dp_1_tf,"
    "double2 *dp_2_tf, double dt, double rho, double mu,"
    "double2 *u_1_tf_tilde, double2 *u_2_tf_tilde",
    "double2 rhs_1 = complex_mul(1+mu*dt/(2*rho)*L_tf[i], u_1_tf[i])+dt/rho*(f_1_tf[i]-dp_1_tf[i]);"
    "double2 rhs_2 = complex_mul(1+mu*dt/(2*rho)*L_tf[i], u_2_tf[i])+dt/rho*(f_2_tf[i]-dp_2_tf[i]);"
    "double2 lhs = 1-mu*dt/(2*rho)*L_tf[i];"
    "u_1_tf_tilde[i] = complex_div(rhs_1, lhs);"
    "u_2_tf_tilde[i] = complex_div(rhs_2, lhs);",
    "solve_gpu",
    preamble="""
    #define complex_ctr(x, y) (double2)(x, y)
    #define complex_mul(a, b) complex_ctr(mad(-(a).y, (b).y, (a).x * (b).x), mad((a).y, (b).x, (a).x * (b).y))
    #define complex_div(a, b) 1/((b).x*(b).x+(b).y*(b).y)*complex_ctr((a).y*(b).y+(a).x*(b).x,(a).y*(b).x-(a).x*(b).y)
    """)

solve_prg = cl.Program(ctx, """
    //#pragma OPENCL EXTENSION cl_amd_printf : enable
    #pragma OPENCL EXTENSION cl_khr_fp64 : enable
    //#pragma OPENCL EXTENSION cl_amd_fp64 : enable
    #define complex_ctr(x, y) (double2)(x, y)
    #define complex_mul(a, b) complex_ctr(-(a).y*(b).y+(a).x*(b).x, (a).y*(b).x+(a).x*(b).y)
    #define complex_div(a, b) 1/((b).x*(b).x+(b).y*(b).y)*complex_ctr((a).y*(b).y+(a).x*(b).x,(a).y*(b).x-(a).x*(b).y)
    __kernel void solve(__global const double2 *u_1_tf, __global const double2 *u_2_tf, __global const double2 *L_tf,
    __global const double2 *f_1_tf, __global const double2 *f_2_tf,
    const double dt, const double rho, const double mu_1, const double mu_2, const unsigned N,
    __global double2 *u_1_tf_tilde, __global double2 *u_2_tf_tilde)
    {
      unsigned i = get_global_id(0);
      unsigned j = get_global_id(1);
      unsigned base = i + N*j;
      double2 m;
      m.x = 1+mu_1*dt/rho*L_tf[base].x; 
      m.y = mu_1*dt/rho*L_tf[base].y; 
      //m.x = 1.0; 
      //m.y = 0.0; 
      double2 rhs_1 = complex_mul(m, u_1_tf[base])+dt/rho*f_1_tf[base];
      double2 rhs_2 = complex_mul(m, u_2_tf[base])+dt/rho*f_2_tf[base];
      double2 lhs;
      lhs.x = 1-mu_2*dt/rho*L_tf[base].x;
      lhs.y = -mu_2*dt/rho*L_tf[base].y;
      //lhs.x = 1.0-mu*dt/rho*L_tf[base].x;
      //lhs.y = -mu*dt/rho*L_tf[base].y;
      u_1_tf_tilde[base] = complex_div(rhs_1, lhs);
      u_2_tf_tilde[base] = complex_div(rhs_2, lhs);
    }
    __kernel void dal_term(__global const double *u_1, __global const double *u_2, __global const double *u_1_o, __global const double *u_2_o, __global const double *ss_1, __global const double *ss_2, __global const double *rho_1, __global const double *rho_2,
    const double dt, const unsigned N,
    __global double *f_1, __global double *f_2)
    {
      unsigned i = get_global_id(0);
      unsigned j = get_global_id(1);
      unsigned base = i + N*j;
      f_1[base] = rho_1[base]*((u_1[base]-u_1_o[base])/dt+ss_1[base]);
      f_2[base] = rho_2[base]*((u_2[base]-u_2_o[base])/dt+ss_2[base]);
    }
    __kernel void vc_laplace(__global const double *u_1,
    __global const double *u_2,
    __global const double *mu_1,
    __global const double *mu_2, const double dx, const unsigned N,
    __global double *V_1, __global double *V_2)
    {
    unsigned i = get_global_id(0);
    unsigned j = get_global_id(1);
    unsigned base = i + N*j;
    unsigned base1 = (i+1 & N-1) + N*j;
    unsigned base2 = (i-1 & N-1) + N*j;
    unsigned base3 = i + N*(j+1 & N-1);
    unsigned base4 = i + N*(j-1 & N-1);
    unsigned base5 = (i-1 & N-1) + N*(j+1 & N-1);
    unsigned base6 = (i+1 & N-1) + N*(j-1 & N-1);

    V_1[base] = 1.0/(dx*dx)*(2.0*(0.5*(mu_1[base]+mu_1[base1])*(u_1[base1]-u_1[base])
               -0.5*(mu_1[base]+mu_1[base2])*(u_1[base]-u_1[base2]))
               +0.5*(mu_1[base3]+mu_1[base])*(u_1[base3]-u_1[base])
               -0.5*(mu_1[base]+mu_1[base4])*(u_1[base]-u_1[base4]) 
               +0.5*(mu_1[base3]+mu_1[base])*(u_2[base3]-u_2[base5])
               -0.5*(mu_1[base]+mu_1[base4])*(u_2[base]-u_2[base2]));

    V_2[base] = 1.0/(dx*dx)*(0.5*(mu_2[base]+mu_2[base1])*(u_2[base1]-u_2[base])
               -0.5*(mu_2[base]+mu_2[base2])*(u_2[base]-u_2[base2])
               +0.5*(mu_2[base]+mu_2[base1])*(u_1[base1]-u_1[base6])
               -0.5*(mu_2[base]+mu_2[base2])*(u_1[base]-u_1[base4])
               +2*(0.5*(mu_2[base]+mu_2[base3])*(u_2[base3]-u_2[base])
               -0.5*(mu_2[base]+mu_2[base4])*(u_2[base]-u_2[base4])));

    }   
    """).build()

prg = cl.Program(ctx, """
    #pragma OPENCL EXTENSION cl_khr_fp64 : enable
    //#pragma OPENCL EXTENSION cl_amd_fp64 : enable
    __kernel void p_filt(__global double2 *u)
    {
      u[0].x = 0;
      u[0].y = 0;
    }
    __kernel void L_adj_1(__global double2 *L_tf)
    {
      L_tf[0].x = 0.0;
      L_tf[0].y = 0.0;
    }
    __kernel void L_adj_2(__global double2 *L_tf)
    {
      L_tf[0].x = 1.0;
      L_tf[0].y = 0.0;
    }
    __kernel void nyq_filt(__global double2 *u, const unsigned N)
    {
      unsigned i = get_global_id(0);
      unsigned base = N/2 + N*i; 
      unsigned base1 = i + N*N/2; 
   
      u[base].x = 0;
      u[base].y = 0;
      u[base1].x = 0;
      u[base1].y = 0;
    }
    __kernel void vort(__global const double *u_1,
    __global const double *u_2,
    const double dx, const unsigned N, __global double *f)
    {
      unsigned i = get_global_id(0);
      unsigned j = get_global_id(1);
      unsigned base = i + N*j;
      unsigned base1 = (i-1 & N-1)+N*j;
      unsigned base2 = i+N*(j-1 & N-1);
      f[base] = 1/dx*(u_2[base]-u_2[base1]-(u_1[base]-u_1[base2]));
    }
    __kernel void div(__global const double *u_1,
    __global const double *u_2,
    const double dx, const unsigned N, __global double *f)
    {
      unsigned i = get_global_id(0);
      unsigned j = get_global_id(1);
      //f[i,j] = 1/dx*(u_1[i_2[i,j],ip_1[i,j]]-u_1[i,j]+u_2[ip_2[i,j],i_1[i,j]]-u_2[i,j];
      unsigned base = i + N*j;
      unsigned base1 = (i+1 & N-1)+N*j;
      unsigned base2 = i+N*(j+1 & N-1);
      f[base] = 1/dx*(u_1[base1]-u_1[base]+u_2[base2]-u_2[base]);
    }
    __kernel void grad(__global const double *phi,
    const double dx, const unsigned N,
    __global double *g_1, __global double *g_2)
    {
      unsigned i = get_global_id(0);
      unsigned j = get_global_id(1);
      //grad_1 = 1/dx*(phi[i_2,i_1]-phi[i_2,im_1])
      //grad_2 = 1/dx*(phi[i_2,i_1]-phi[im_2,i_1])
      unsigned base = i+N*j;
      unsigned base1 = (i-1 & N-1)+N*j;
      unsigned base2 = i+N*(j-1 & N-1);
      g_1[base] = 1/dx*(phi[base]-phi[base1]);
      g_2[base] = 1/dx*(phi[base]-phi[base2]);
    }
    __kernel void ss_conv(__global const double *u_1,
    __global const double *u_2, const double dx, const unsigned N,
    __global double *S_1, __global double *S_2)
    {
      unsigned i = get_global_id(0);
      unsigned j = get_global_id(1);
      //Compute S = 0.5*(div(UU) + (U*grad)U).
      unsigned base = i+N*j;
      unsigned base1 = (i-1 & N-1)+N*j;
      unsigned base2 = (i+1 & N-1)+N*j;
      unsigned base3 = i+N*(j-1 & N-1);
      unsigned base4 = i+N*(j+1 & N-1);
      unsigned base5 = (i-1 & N-1)+N*(j+1 & N-1);
      unsigned base6 = (i+1 & N-1)+N*(j-1 & N-1);
      double D_x, D_y, A_x, A_y, D, A;
      D_x = (0.25/dx)*(
          (u_1[base2]+u_1[base])*(u_1[base2]+u_1[base]) -
          (u_1[base]+u_1[base1])*(u_1[base]+u_1[base1]));
      D_y = (0.25/dx)*(
          (u_2[base4]+u_2[base5])*(u_1[base4]+u_1[base]) -
          (u_2[base]+u_2[base1])*(u_1[base]+u_1[base3]));
      D = D_x + D_y;
      A_x =  (0.25/dx)*(
          (u_1[base2]+u_1[base])*(u_1[base2]-u_1[base]) +
          (u_1[base]+u_1[base1])*(u_1[base]-u_1[base1]));
      A_y = (0.25/dx)*(
            (u_2[base4]+u_2[base5])*(u_1[base4]-u_1[base]) +
            (u_2[base]+u_2[base1])*(u_1[base]-u_1[base3]));
      A = A_x + A_y;
      S_1[base] = 0.5*(D+A);
      D_x = (0.25/dx)*(
          (u_1[base2]+u_1[base6])*(u_2[base2]+u_2[base]) -
          (u_1[base]+u_1[base3])*(u_2[base]+u_2[base1]));
      D_y = (0.25/dx)*(
          (u_2[base4]+u_2[base])*(u_2[base4]+u_2[base]) -
          (u_2[base]+u_2[base3])*(u_2[base]+u_2[base3]));
      D = D_x + D_y;
      A_x =  (0.25/dx)*(
          (u_1[base2]+u_1[base6])*(u_2[base2]-u_2[base]) +
          (u_1[base]+u_1[base3])*(u_2[base]-u_2[base1]));
      A_y = (0.25/dx)*(
            (u_2[base4]+u_2[base])*(u_2[base4]-u_2[base]) +
            (u_2[base]+u_2[base3])*(u_2[base]-u_2[base3]));
      A = A_x + A_y;
      S_2[base] = 0.5*(D+A);
    }
    __kernel void interpctolr(__global const double *phi, __global const unsigned *i_2,  __global const unsigned *im_1, const unsigned N, __global double *f)
    {
      unsigned i = get_global_id(0);
      unsigned j = get_global_id(1);
      //phi_itp = 0.5*(phi+phi[i_2,im_1])

      unsigned base = i+N*j;
      unsigned base1 = im_1[base]+N*i_2[base];
      f[base] = 0.5*(phi[base]+phi[base1]);
    }
    __kernel void interpctotb(__global const double *phi, __global const unsigned *i_1, __global const unsigned *im_2, const unsigned N, __global double *f)
    {
      unsigned i = get_global_id(0);
      unsigned j = get_global_id(1);
      //phi_itp = 0.5*(phi+phi[im_2,i_1])

      unsigned base = i+N*j;
      unsigned base1 = i_1[base]+N*im_2[base];
      f[base] = 0.5*(phi[base]+phi[base1]);
    }
    __kernel void interplrtoc(__global const double *phi, __global const unsigned *i_2, __global const unsigned *ip_1, const unsigned N, __global double *f)
    {
      unsigned i = get_global_id(0);
      unsigned j = get_global_id(1);
      //phi_itp = 0.5*(phi+phi[i_2,ip_1])

      unsigned base = i+N*j;
      unsigned base1 = ip_1[base]+N*i_2[base];
      f[base] = 0.5*(phi[base]+phi[base1]);
    }
    __kernel void interptbtoc(__global const double *phi, __global const unsigned *i_1,__global const unsigned *ip_2, const unsigned N, __global double *f)
    {
      unsigned i = get_global_id(0);
      unsigned j = get_global_id(1);
      //phi_itp = 0.5*(phi+phi[ip_2,i_1])

      unsigned base = i+N*j;
      unsigned base1 = i_1[base]+N*ip_2[base];
      f[base] = 0.5*(phi[base]+phi[base1]);
    }
    """).build()

div_kernel = prg.div
div_kernel.set_scalar_arg_dtypes([None]*2 + [np.float64, np.uint32] + [None])

vort_kernel = prg.vort
vort_kernel.set_scalar_arg_dtypes([None]*2 + [np.float64, np.uint32] + [None])

L_adj_1_kernel = prg.L_adj_1
L_adj_1_kernel.set_scalar_arg_dtypes([None])

L_adj_2_kernel = prg.L_adj_2
L_adj_2_kernel.set_scalar_arg_dtypes([None])

p_filt_kernel = prg.p_filt
p_filt_kernel.set_scalar_arg_dtypes([None])

nyq_filt_kernel = prg.nyq_filt
nyq_filt_kernel.set_scalar_arg_dtypes([None] + [np.uint32])

solve_kernel = solve_prg.solve
solve_kernel.set_scalar_arg_dtypes([None]*5 + [np.float64]*4 + [np.uint32] + [None]*2)

vc_laplace_kernel = solve_prg.vc_laplace
vc_laplace_kernel.set_scalar_arg_dtypes([None]*4 + [np.float64] + [np.uint32] + [None]*2)

dal_term_kernel = solve_prg.dal_term
dal_term_kernel.set_scalar_arg_dtypes([None]*8 + [np.float64] + [np.uint32] + [None]*2)

grad_kernel = prg.grad
grad_kernel.set_scalar_arg_dtypes([None]*1 + [np.float64, np.uint32] + [None]*2)

ss_conv_kernel = prg.ss_conv
ss_conv_kernel.set_scalar_arg_dtypes([None]*2 + [np.float64, np.uint32] + [None]*2)

interpctolr_kernel = prg.interpctolr
interpctolr_kernel.set_scalar_arg_dtypes([None]*3 + [np.uint32] + [None])
interpctotb_kernel = prg.interpctotb
interpctotb_kernel.set_scalar_arg_dtypes([None]*3 + [np.uint32] + [None])
interplrtoc_kernel = prg.interplrtoc
interplrtoc_kernel.set_scalar_arg_dtypes([None]*3 + [np.uint32] + [None])
interptbtoc_kernel = prg.interptbtoc
interptbtoc_kernel.set_scalar_arg_dtypes([None]*3 + [np.uint32] + [None])

for ii in range(start_size,end_size+1): 
    #set up grid
    N = 2**ii
    dx = 1.0/N
    rho_0 = 1.0
    rho_1 = 1.0
    mu_0 = 0.01 
    mu_1 = 1.0

    n_iter = 10

    x_1_stag = np.arange(-0.5, 0.5, dx)
    x_2_stag = np.arange(-0.5, 0.5, dx)
    xx_1_stag, xx_2_stag = np.meshgrid(x_1_stag, x_2_stag)

    x_1_c = np.arange(-0.5+dx/2, 0.5+dx/2, dx)
    x_2_c = np.arange(-0.5+dx/2, 0.5+dx/2, dx)
    xx_1_c, xx_2_c = np.meshgrid(x_1_c, x_2_c)
    
    if PR_INT:
        mesh3 = np.vstack((xx_1_c[np.newaxis],xx_2_c[np.newaxis],np.zeros((N,N))[np.newaxis]))

    rho_tilde_1 = rho_0*rho_1*0.5*(1.0+np.sin(2*pi*xx_1_stag)*np.sin(2*pi*xx_2_c))
    rho_tilde_2 = rho_0*rho_1*0.5*(1.0+np.sin(2*pi*xx_1_c)*np.sin(2*pi*xx_2_stag))

    mu_tilde_1 = mu_0*mu_1*0.5*(1.0+np.cos(2*pi*xx_1_stag)*np.cos(2*pi*xx_2_c))
    mu_tilde_2 = mu_0*mu_1*0.5*(1.0+np.cos(2*pi*xx_1_c)*np.cos(2*pi*xx_2_stag))

    #mu_bar = 0.0*mu_0 
    #rho_bar = 10.0*rho_0 
    mu_min = mu_0
    mu_max = mu_0 + mu_0*mu_1
    mu_hat = mu_max
    #mu_hat = 0.5*(mu_min+mu_max)
    rho_min = rho_0
    rho_max = rho_0 + rho_0*rho_1
    rho_hat = rho_max
    #rho_hat = 0.5*(rho_min+rho_max)
    rho_1_tr = rho_tilde_1.copy()
    rho_2_tr = rho_tilde_2.copy()
    #mu_0 = mu_0+mu_bar
    #rho_0 = rho_0+rho_bar
    #rho_tilde_1 = rho_tilde_1 - rho_bar
    #rho_tilde_2 = rho_tilde_2 - rho_bar
    #mu_tilde_1 = mu_tilde_1 - mu_bar
    #mu_tilde_2 = mu_tilde_2 - mu_bar

    dt = 0.5*dx
    clock_max = np.int(np.round(1/dt,0))
    #clock_max = 100
    print 'grid size', N
    print 'total timesteps', clock_max

    #set up Lagrangian parameters
    #N_r = N/2
    #N_s = N/2
    N_r = 3*N/8
    N_s = 75*N/16
    b_q = N_r*N_s
    dr = 1./N_r
    ds = 1./N_s
    dtheta = dr*ds
    #a_lag = 0.25
    #b_lag = 0.2
    a_lag = 0.2
    b_lag = 0.25
    #a_lag = 0.225 #start in circle
    #b_lag = 0.225
    g_lag = 0.0625

    #set up Lagrangian grid
    r_lag = np.arange(dr/2, 1, dr)
    s_lag = np.arange(ds/2, 1, ds)
    rr_lag, ss_lag = np.meshgrid(r_lag, s_lag)
    rr_lag = rr_lag.reshape(b_q,1)
    ss_lag = ss_lag.reshape(b_q,1)

    i_lag = np.arange(0, b_q)
    ip_s_lag = np.remainder(i_lag+N_r, b_q)
    im_s_lag = np.remainder(i_lag-N_r, b_q)

    L_tf = calc_transf(dx)
    i_1, i_2, ip_1, ip_2, im_1, im_2 = calc_inds(N)

    #u_1_soln = 1.0+np.cos(2*pi*xx_1_stag)*np.sin(4*pi*xx_2_c)
    #u_2_soln = -0.5*np.sin(2*pi*xx_1_c)*np.cos(4*pi*xx_2_stag)
    #u_1_soln = 1.0+1.0*np.random.rand(N,N)
    #u_2_soln = 1.0*np.random.rand(N,N)
    u_1_soln = np.zeros((N,N)) #initial conditions
    u_2_soln = np.zeros((N,N))
    #u_1_soln = np.sin(2*pi*xx_2_c) #shear field
    #u_2_soln = np.zeros((N,N))

    #move data to gpu
    start_time = time.time()
    plan = Plan((N, N), queue=queue, dtype=np.complex128, fast_math=False)
    print 'time spent making plan is', time.time()-start_time, 'seconds'

    u_1_gpu = cl_array.to_device(queue, u_1_soln.astype(np.float64))
    u_2_gpu = cl_array.to_device(queue, u_2_soln.astype(np.float64))
    u_1_o_gpu = cl_array.to_device(queue, u_1_soln.astype(np.float64))
    u_2_o_gpu = cl_array.to_device(queue, u_2_soln.astype(np.float64))
    u_1_tf_gpu = cl_array.to_device(queue, u_1_soln.astype(np.complex128))
    u_2_tf_gpu = cl_array.to_device(queue, u_2_soln.astype(np.complex128))
    p_gpu = cl_array.empty_like(u_1_gpu) 
    p_tf_gpu = cl_array.empty_like(u_1_tf_gpu) 
    dp_1_gpu = cl_array.empty_like(u_1_gpu) 
    dp_2_gpu = cl_array.empty_like(u_1_gpu) 
    L_tf_gpu = cl_array.to_device(queue, L_tf.astype(np.complex128))
    f_1_gpu = cl_array.empty_like(u_1_gpu) 
    f_2_gpu = cl_array.empty_like(u_1_gpu) 
    f_1_tf_gpu = cl_array.empty_like(u_1_tf_gpu) 
    f_2_tf_gpu = cl_array.empty_like(u_1_tf_gpu) 
    mu_1_gpu = cl_array.empty_like(u_1_gpu) 
    mu_2_gpu = cl_array.empty_like(u_1_gpu) 
    if SAVE:
        rho_1_save_gpu = cl_array.empty_like(u_1_gpu) 
        rho_2_save_gpu = cl_array.empty_like(u_1_gpu) 
        mu_1_save_gpu = cl_array.empty_like(u_1_gpu) 
        mu_2_save_gpu = cl_array.empty_like(u_1_gpu) 
    if PR_INT:
        mu_1_plot_gpu = cl_array.empty_like(u_1_gpu) 
        mu_2_plot_gpu = cl_array.empty_like(u_1_gpu) 
        vort_gpu = cl_array.empty_like(u_1_gpu) 
    rho_1_gpu = cl_array.empty_like(u_1_gpu) 
    rho_2_gpu = cl_array.empty_like(u_1_gpu) 
    u_1_tilde_gpu = cl_array.empty_like(u_1_gpu) 
    u_2_tilde_gpu = cl_array.empty_like(u_1_gpu) 
    xx_1_stag_gpu = cl_array.to_device(ctx, queue, xx_1_stag.astype(np.float64))
    xx_2_stag_gpu = cl_array.to_device(ctx, queue, xx_2_stag.astype(np.float64))
    xx_1_c_gpu = cl_array.to_device(ctx, queue, xx_1_c.astype(np.float64))
    xx_2_c_gpu = cl_array.to_device(ctx, queue, xx_2_c.astype(np.float64))
    mu_1_gpu = cl_array.to_device(queue, mu_tilde_1.astype(np.float64)) 
    mu_2_gpu = cl_array.to_device(queue, mu_tilde_2.astype(np.float64)) 
    rho_1_gpu = cl_array.to_device(queue, rho_tilde_1.astype(np.float64)) 
    rho_2_gpu = cl_array.to_device(queue, rho_tilde_2.astype(np.float64)) 
    rho_1_tr_gpu = cl_array.to_device(queue, rho_1_tr.astype(np.float64)) 
    rho_2_tr_gpu = cl_array.to_device(queue, rho_2_tr.astype(np.float64)) 
    mu_1_plot_gpu = cl_array.empty_like(u_1_gpu) 
    mu_2_plot_gpu = cl_array.empty_like(u_1_gpu) 

    #solve
    queue.finish()
    start_time = time.time()
    #norm_o = 10**10
    ##normv_o = 10**10
    for t_step in range(1, clock_max+1):
	solve(t_step)
        if (t_step-1) % DISP_INT == 0:
            print t_step
        #    print 'u_1 max is', u_1_gpu.get().max()
        #    print 'u_2 max is', u_2_gpu.get().max()
        #grad_kernel(queue, (N,N), (WGPSIZE_1, WGPSIZE_2), u_1_gpu.data, dx, N, f_1_gpu.data, f_2_gpu.data)
        #D11 = np.sum(f_1_gpu.get()*f_1_gpu.get())
        #D12 = np.sum(f_2_gpu.get()*f_2_gpu.get())
        #grad_kernel(queue, (N,N), (WGPSIZE_1, WGPSIZE_2), u_2_gpu.data, dx, N, f_1_gpu.data, f_2_gpu.data)
        #D21 = np.sum(f_1_gpu.get()*f_1_gpu.get())
        #D22 = np.sum(f_2_gpu.get()*f_2_gpu.get())
        ##print 'norm is', D11+D12+D21+D22+np.sum(u_1_gpu.get()*u_1_gpu.get())+np.sum(u_2_gpu.get()*u_2_gpu.get())
        #normu = dx**2*(dt*mu_0*(D11+D12)+np.sum(u_1_gpu.get()*u_1_gpu.get()))
        #normv = dx**2*(dt*mu_0*(D21+D22)+np.sum(u_2_gpu.get()*u_2_gpu.get()))
        #norm = normu+normv
        ##norm = dx**2*(np.sum(u_1_gpu.get()*u_1_gpu.get())+np.sum(u_2_gpu.get()*u_2_gpu.get()))
        if WRITE_NORM:
            print 'norm is', norm
            f.write('%g \n' % np.sqrt(norm))
            ##f2.write('%g \n' % normv)
            if (norm - norm_o > 0):
                if (np.abs(norm - norm_o) > 10**-12):
                    print norm
                    print norm_o
                    print 'norm increasing'
                    print t_step
                    #sys.exit()
        ##if (normv - normv_o > 0):
        ##    if (np.abs(normv - normv_o) > 10**-12):
        ##        print normv
        ##        print normv_o
        ##        print 'normv increasing'
        ##        print t_step
        ##        #sys.exit()
        #norm_o = norm
        ##normv_o = normv
    u_1_soln = u_1_gpu.get()
    u_2_soln = u_2_gpu.get()
    if ii != start_size: 
        errinf1_t, errinf2_t, err21_t, err22_t, errinfp_t, err2p_t = err1(clock_max, u_1_soln, u_2_soln, p_gpu.get())
        print 'True Errinf1 is', errinf1_t, '\n True Errinf2 is', errinf2_t
        print 'True Err21 is', err21_t, '\n True Err22 is', err22_t
        print 'True p Errinf is', errinfp_t, '\n True p Err2 is', err2p_t

    queue.finish()
    print 'time spent is', time.time()-start_time, 'seconds'

if WRITE_NORM:
    f.close()
#f2.close()
