{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Load Packages"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2016-10-26T07:55:27.312756",
     "start_time": "2016-10-26T07:55:26.714692"
    },
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "%pylab inline\n",
    "import scipy\n",
    "import h5py\n",
    "import skimage\n",
    "import os\n",
    "from skimage import io,transform,img_as_float\n",
    "from skimage.io import imread,imsave\n",
    "from collections import OrderedDict\n",
    "import decimal\n",
    "notebook_dir = os.getcwd()\n",
    "project_dir = os.path.split(notebook_dir)[0]\n",
    "result_dir = project_dir + '/Results/Images/'\n",
    "if not os.path.isdir(result_dir):\n",
    "    os.makedirs(result_dir)\n",
    "tmp_dir = project_dir + '/Tmp/'\n",
    "if not os.path.isdir(tmp_dir):\n",
    "    os.makedirs(tmp_dir)\n",
    "photo_dir = project_dir + '/Images/ControlPaper/'\n",
    "art_dir = project_dir + '/Images/ControlPaper/'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Define Python helper functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2016-10-26T07:55:28.025319",
     "start_time": "2016-10-26T07:55:27.711666"
    },
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "def make_torch_input(filename, layers, loss_functions, args):\n",
    "    f = h5py.File(filename,'w')\n",
    "    for l,layer in enumerate(layers):\n",
    "        layer_group = f.create_group(layer)\n",
    "        for lf,loss_function in enumerate(loss_functions[l]):\n",
    "            lf_group = layer_group.create_group(loss_function)\n",
    "            for arg in args[l][lf]:\n",
    "                dataset = lf_group.create_dataset(arg, data=args[l][lf][arg])\n",
    "    f.close()\n",
    "    \n",
    "def make_torch_init(filename, init):\n",
    "    f = h5py.File(filename,'w')\n",
    "    f.create_dataset('init', data=init)\n",
    "    f.close()\n",
    "\n",
    "def get_torch_output(filename):\n",
    "    f = h5py.File(filename,'r')\n",
    "    data = f['opt_result']\n",
    "    return data.value\n",
    "    f.close()\n",
    "def get_torch_loss(filename):\n",
    "    f = h5py.File(filename,'r')\n",
    "    data = f['losses']\n",
    "    return data.value\n",
    "    f.close()\n",
    "\n",
    "def list2css(layers):\n",
    "    '''\n",
    "    Takes list of strings and returns comma separated string\n",
    "    '''\n",
    "    css = str()\n",
    "    for l in layers:\n",
    "        css = css+l+','\n",
    "    return css[:-1]\n",
    "\n",
    "def get_activations(images, caffe_model, layers='all', gpu=0):\n",
    "    '''\n",
    "    Function to get neural network activations in response to images from torch.\n",
    "    \n",
    "    :param images: array of images\n",
    "    :param caffe_model: file name of the network .caffemodel file\n",
    "    :param layers: network layers for which the activations should be computed\n",
    "    :return: network activations in response to images\n",
    "    '''\n",
    "    layers = list2css(layers)\n",
    "    images_file_name = tmp_dir + 'images.hdf5'\n",
    "    output_file_name = tmp_dir + 'activations.hdf5'\n",
    "    f = h5py.File(images_file_name, 'w')\n",
    "    f.create_dataset('images', data=images)\n",
    "    f.close()\n",
    "    context = {\n",
    "    'caffe_model': caffe_model,\n",
    "    'images': images_file_name,\n",
    "    'layers': layers,\n",
    "    'gpu': gpu,\n",
    "    'backend': 'cudnn',\n",
    "    'output_file': output_file_name,\n",
    "    'project_dir': project_dir\n",
    "    }\n",
    "    template = ('#!/bin/bash\\n' +\n",
    "                'cd {project_dir} && ' + \n",
    "                '/usr/local/torch/install/bin/th ComputeActivations.lua ' + \n",
    "                '-caffe_model {caffe_model} ' +\n",
    "                '-images {images} ' + \n",
    "                '-layers {layers} ' + \n",
    "                '-gpu {gpu} ' + \n",
    "                '-backend {backend} ' +\n",
    "                '-output_file {output_file}')\n",
    "    script_name = project_dir + '/get_activations.sh'\n",
    "    with open(script_name, 'w') as script:\n",
    "        script.write(template.format(**context))\n",
    "    os.chmod(script_name, 0o755)\n",
    "    #execute script \n",
    "    !{script_name} >/dev/null\n",
    "    f = h5py.File(output_file_name,'r')\n",
    "    act = OrderedDict()\n",
    "    for key in f.keys():\n",
    "        act[key] = f[key].value.copy()\n",
    "    f.close()\n",
    "    return act\n",
    "\n",
    "def preprocess(image):\n",
    "    assert(image.max() <= 1.001)\n",
    "    imagenet_mean = array([0.40760392,  0.45795686,  0.48501961])\n",
    "    image_torch = 255 * (image[:,:,::-1] - imagenet_mean).transpose(2,0,1)\n",
    "    return image_torch\n",
    "\n",
    "def deprocess(image_torch):\n",
    "    imagenet_mean = array([0.40760392,  0.45795686,  0.48501961])\n",
    "    image = (image_torch.transpose(1,2,0)/255. + imagenet_mean)[:,:,::-1]\n",
    "    image[image>1] = 1\n",
    "    image[image<0] = 0\n",
    "    return image\n",
    "\n",
    "def gram_matrix(activations):\n",
    "    n_fm = activations.shape[0]\n",
    "    F = activations.reshape(n_fm,-1)\n",
    "    G = F.dot(F.T) / F[0,:].size\n",
    "    return G\n",
    "\n",
    "import itertools\n",
    "def flatten(l):\n",
    "    return list(itertools.chain.from_iterable(l))\n",
    "\n",
    "def set_model(name, project_dir):\n",
    "    if name == 'org_pad':\n",
    "        model = project_dir + '/Models/VGG_ILSVRC_19_layers_conv.caffemodel'\n",
    "    elif name == 'norm_pad':\n",
    "        model = project_dir + '/Models/vgg_normalised.caffemodel'\n",
    "    else:\n",
    "        assert False, 'unknown model name'\n",
    "    return model\n",
    "\n",
    "def match_color(target_img, source_img, mode='sym', eps=1e-5):\n",
    "    '''\n",
    "    Matches the colour distribution of the target image to that of the source image\n",
    "    using a linear transform.\n",
    "    Images are expected to be of form (w,h,c).\n",
    "    Modes are chol, pca or sym for different choices of basis.\n",
    "    '''\n",
    "    mu_t = target_img.mean(0).mean(0)\n",
    "    t = target_img - mu_t\n",
    "    t = t.transpose(2,0,1).reshape(3,-1)\n",
    "    Ct = t.dot(t.T) / t.shape[1] + eps * eye(t.shape[0])\n",
    "    mu_s = source_img.mean(0).mean(0)\n",
    "    s = source_img - mu_s\n",
    "    s = s.transpose(2,0,1).reshape(3,-1)\n",
    "    Cs = s.dot(s.T) / s.shape[1] + eps * eye(s.shape[0])\n",
    "    if mode == 'chol':\n",
    "        chol_t = np.linalg.cholesky(Ct)\n",
    "        chol_s = np.linalg.cholesky(Cs)\n",
    "        ts = chol_s.dot(np.linalg.inv(chol_t)).dot(t)\n",
    "    if mode == 'pca':\n",
    "        eva_t, eve_t = np.linalg.eigh(Ct)\n",
    "        Qt = eve_t.dot(np.sqrt(np.diag(eva_t))).dot(eve_t.T)\n",
    "        eva_s, eve_s = np.linalg.eigh(Cs)\n",
    "        Qs = eve_s.dot(np.sqrt(np.diag(eva_s))).dot(eve_s.T)\n",
    "        ts = Qs.dot(np.linalg.inv(Qt)).dot(t)\n",
    "    if mode == 'sym':\n",
    "        eva_t, eve_t = np.linalg.eigh(Ct)\n",
    "        Qt = eve_t.dot(np.sqrt(np.diag(eva_t))).dot(eve_t.T)\n",
    "        Qt_Cs_Qt = Qt.dot(Cs).dot(Qt)\n",
    "        eva_QtCsQt, eve_QtCsQt = np.linalg.eigh(Qt_Cs_Qt)\n",
    "        QtCsQt = eve_QtCsQt.dot(np.sqrt(np.diag(eva_QtCsQt))).dot(eve_QtCsQt.T)\n",
    "        ts = np.linalg.inv(Qt).dot(QtCsQt).dot(np.linalg.inv(Qt)).dot(t)\n",
    "    matched_img = ts.reshape(*target_img.transpose(2,0,1).shape).transpose(1,2,0)\n",
    "    matched_img += mu_s\n",
    "    matched_img[matched_img>1] = 1\n",
    "    matched_img[matched_img<0] = 0\n",
    "    return matched_img\n",
    "\n",
    "def lum_transform(image):\n",
    "    \"\"\"\n",
    "    Returns the projection of a colour image onto the luminance channel\n",
    "    Images are expected to be of form (w,h,c) and float in [0,1].\n",
    "    \"\"\"\n",
    "    img = image.transpose(2,0,1).reshape(3,-1)\n",
    "    lum = np.array([.299, .587, .114]).dot(img).squeeze()\n",
    "    img = tile(lum[None,:],(3,1)).reshape((3,image.shape[0],image.shape[1]))\n",
    "    return img.transpose(1,2,0)\n",
    "\n",
    "def rgb2luv(image):\n",
    "    img = image.transpose(2,0,1).reshape(3,-1)\n",
    "    luv = np.array([[.299, .587, .114],[-.147, -.288, .436],[.615, -.515, -.1]]).dot(img).reshape((3,image.shape[0],image.shape[1]))\n",
    "    return luv.transpose(1,2,0)\n",
    "def luv2rgb(image):\n",
    "    img = image.transpose(2,0,1).reshape(3,-1)\n",
    "    rgb = np.array([[1, 0, 1.139],[1, -.395, -.580],[1, 2.03, 0]]).dot(img).reshape((3,image.shape[0],image.shape[1]))\n",
    "    return rgb.transpose(1,2,0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## org net "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "cp_mode = 'match' #match|lum\n",
    "img_dirs = OrderedDict()\n",
    "img_dirs['content'] = photo_dir\n",
    "img_dirs['style'] = photo_dir\n",
    "img_names = OrderedDict()\n",
    "img_names['content'] = 'fig3_content.jpg'\n",
    "img_names['style'] = 'fig3_style1.jpg'\n",
    "layers = OrderedDict()\n",
    "layers['content'] =  ['relu4_2']\n",
    "layers['style'] = ['relu1_1','relu2_1','relu3_1','relu4_1','relu5_1']\n",
    "layers_now = layers['style'] + layers['content']\n",
    "img_size = 512\n",
    "max_iter = 500\n",
    "hr_img_size = None\n",
    "hr_max_iter = 200\n",
    "gpu = 0\n",
    "init = 'image'\n",
    "model_name = 'org_pad'\n",
    "caffe_model = set_model(model_name, project_dir)\n",
    "input_file_name = tmp_dir + 'input_cc.hdf5'\n",
    "init_file_name = tmp_dir + 'init_cc.hdf5'\n",
    "output_file_name = tmp_dir + 'output_cc.hdf5'\n",
    "sw = 1e3\n",
    "cw = 1\n",
    "weights = OrderedDict()\n",
    "weights['style'] = [\n",
    "    [array([sw/64**2])],\n",
    "    [array([sw/128**2])],\n",
    "    [array([sw/256**2])],\n",
    "    [array([sw/512**2])],\n",
    "    [array([sw/512**2])],\n",
    "]\n",
    "weights['content'] = [[array([cw])]]\n",
    "loss_functions = [['GramMSE']] * len(layers['style']) + [['MSE']]\n",
    "lf = list2css(map(str,loss_functions));lf = str.replace(lf,'[','');lf = str.replace(lf,']','')\n",
    "result_image_name = (\n",
    "'cimg_' + img_names['content'] + \n",
    "'_simg_' + img_names['style'] + \n",
    "'_cpm_' + cp_mode + \n",
    "'_sz_' + str(img_size) + \n",
    "'_model_' + model_name + \n",
    "'_layers_' + list2css(layers_now) + \n",
    "# '_lf_' + lf +\n",
    "'_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "'_cw_' + '%.1E'%decimal.Decimal(cw) + \n",
    "'_init_' + init +\n",
    "'.jpg'\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### norm net "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "cp_mode = 'lum'\n",
    "img_dirs = OrderedDict()\n",
    "img_dirs['content'] = photo_dir\n",
    "img_dirs['style'] = art_dir\n",
    "img_names = OrderedDict()\n",
    "img_names['content'] = 'fig3_content.jpg'\n",
    "img_names['style'] = 'fig3_style1.jpg'\n",
    "layers = OrderedDict()\n",
    "layers['content'] =  ['relu4_2']\n",
    "layers['style'] = ['relu1_1','relu2_1','relu3_1','relu4_1','relu5_1']\n",
    "layers_now = layers['style'] + layers['content']\n",
    "img_size = 512\n",
    "max_iter = 500\n",
    "hr_img_size = 1024\n",
    "hr_max_iter = 200\n",
    "gpu = 0\n",
    "init = 'image'\n",
    "model_name = 'norm_pad'\n",
    "caffe_model = set_model(model_name, project_dir)\n",
    "input_file_name = tmp_dir + 'input_cc.hdf5'\n",
    "init_file_name = tmp_dir + 'init_cc.hdf5'\n",
    "output_file_name = tmp_dir + 'output_cc.hdf5'\n",
    "sw = 1e9 / len(layers['style'])\n",
    "cw = 1e6\n",
    "ptw = 1e5\n",
    "weights = OrderedDict()\n",
    "weights['style'] = [[array([sw])]] * len(layers['style'])\n",
    "weights['content'] = [[array([cw])]]\n",
    "loss_functions = [['GramMSE']] * len(layers['style']) + [['MSE']]\n",
    "lf = list2css(map(str,loss_functions));lf = str.replace(lf,'[','');lf = str.replace(lf,']','')\n",
    "result_image_name = (\n",
    "'cimg_' + img_names['content'] + \n",
    "'_simg_' + img_names['style'] + \n",
    "'_cpm_' + cp_mode + \n",
    "'_sz_' + str(img_size) + \n",
    "'_model_' + model_name + \n",
    "'_layers_' + list2css(layers_now) + \n",
    "# '_lf_' + lf +\n",
    "'_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "'_cw_' + '%.1E'%decimal.Decimal(cw) + \n",
    "'_init_' + init +\n",
    "'.jpg'\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "if os.path.isfile(result_dir + result_image_name) == False: #check if file exists\n",
    "    #get images\n",
    "    conditions = img_names.keys()\n",
    "    imgs = OrderedDict()\n",
    "    imgs_torch = OrderedDict()\n",
    "    act = OrderedDict()\n",
    "    for cond in conditions:\n",
    "        imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "        if imgs[cond].ndim == 2:\n",
    "            imgs[cond] = tile(imgs[cond][:,:,None],(1,1,3))\n",
    "        elif imgs[cond].shape[2] == 4:\n",
    "            imgs[cond] = imgs[cond][:,:,:3]\n",
    "        try:\n",
    "            imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / img_size**2))\n",
    "        except:\n",
    "            print('no downsampling: ' + img_names[cond])\n",
    "        imshow(imgs[cond]);show()\n",
    "    #color preservation\n",
    "    if cp_mode == 'lum':\n",
    "        org_content = imgs['content'].copy()\n",
    "        for cond in conditions:\n",
    "            imgs[cond] = lum_transform(imgs[cond])\n",
    "        imgs['style'] -= imgs['style'].mean(0).mean(0)\n",
    "        imgs['style'] += imgs['content'].mean(0).mean(0)\n",
    "        for cond in conditions:\n",
    "            imgs[cond][imgs[cond]<0] = 0\n",
    "            imgs[cond][imgs[cond]>1] = 1\n",
    "    elif cp_mode =='match':\n",
    "        imgs['style'] = match_color(imgs['style'], imgs['content'], mode='pca')\n",
    "    elif cp_mode == 'match_style':\n",
    "        imgs['content'] = match_color(imgs['content'], imgs['style'], mode='pca')\n",
    "    else:\n",
    "        raise NameError('Unknown colour preservation mode')\n",
    "    for cond in conditions:\n",
    "        imgs_torch[cond] = preprocess(imgs[cond])\n",
    "        act[cond] = get_activations(imgs_torch[cond],\n",
    "                                    caffe_model,\n",
    "                                    layers=layers['style'],\n",
    "                                    gpu=gpu\n",
    "                                   )\n",
    "\n",
    "    if init == 'image':\n",
    "        make_torch_init(init_file_name, imgs_torch['content'])\n",
    "    elif init == 'random':\n",
    "        make_torch_init(init_file_name, randn(*imgs_torch['content'].shape))\n",
    "    else: \n",
    "        raise NameError('Unknown init')\n",
    "\n",
    "    args = OrderedDict()\n",
    "    args['style'] = [\n",
    "                        [\n",
    "                            {'targets': gram_matrix(act['style'][layer])[None,:],\n",
    "                             'weights': weights['style'][l][0]} \n",
    "                        ] \n",
    "                    for l,layer in enumerate(layers['style'])]\n",
    "    act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "    args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "    args_now = args['style'] + args['content']\n",
    "    make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "    context = {\n",
    "        'caffe_model': caffe_model,\n",
    "        'input_file': input_file_name,\n",
    "        'init_file': init_file_name,\n",
    "        'gpu': gpu,\n",
    "        'max_iter': max_iter,\n",
    "        'backend': 'cudnn',\n",
    "        'print_iter': 50,\n",
    "        'save_iter': 0,\n",
    "        'layer_order': list2css(layers_now),\n",
    "        'output_file': output_file_name,\n",
    "        'project_dir': project_dir\n",
    "    }\n",
    "    template = (\n",
    "                '#!/bin/bash\\n' +\n",
    "                'cd {project_dir} && ' + \n",
    "                'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                '-caffe_model {caffe_model} ' +\n",
    "                '-input_file {input_file} ' + \n",
    "                '-init_file {init_file} ' + \n",
    "                '-gpu {gpu} ' + \n",
    "                '-max_iter {max_iter} ' +\n",
    "                '-print_iter {print_iter} ' +\n",
    "                '-save_iter {save_iter} ' +\n",
    "                '-backend {backend} ' + \n",
    "                '-layer_order {layer_order} ' +\n",
    "                '-output_file {output_file}'\n",
    "               )\n",
    "\n",
    "    script_name = project_dir + '/run_synthesis.sh'\n",
    "    with open(script_name, 'w') as script:\n",
    "        script.write(template.format(**context))\n",
    "    os.chmod(script_name, 0o755)\n",
    "    #execute script \n",
    "    !{script_name}\n",
    "    output = deprocess(get_torch_output(output_file_name))\n",
    "    if cp_mode == 'lum':\n",
    "        org_content = rgb2luv(org_content)\n",
    "        org_content[:,:,0] = output.mean(2)\n",
    "        output = luv2rgb(org_content)\n",
    "        output[output<0] = 0\n",
    "        output[output>1]=1\n",
    "    imshow(output);gcf().set_size_inches(8,14);show()\n",
    "    imsave(result_dir + result_image_name, output)\n",
    "\n",
    "#Make Highres\n",
    "if hr_img_size: \n",
    "    lr_output = img_as_float(imread(result_dir + result_image_name))\n",
    "    result_image_name = (\n",
    "    'cimg_' + img_names['content'] + \n",
    "    '_simg_' + img_names['style'] + \n",
    "    '_cpm_' + cp_mode + \n",
    "    '_sz_' + str(img_size) + \n",
    "    '_hrsz_' + str(hr_img_size) + \n",
    "    '_model_' + model_name + \n",
    "    '_layers_' + list2css(layers_now) + \n",
    "#     '_lf_' + lf +\n",
    "    '_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "    '_cw_' + '%.1E'%decimal.Decimal(cw) +  \n",
    "    '_init_' + init +\n",
    "    '.jpg'\n",
    "    )\n",
    "    if os.path.isfile(result_dir + result_image_name) == False: #check if file exists\n",
    "        #get images\n",
    "        conditions = img_names.keys()\n",
    "        imgs = OrderedDict()\n",
    "        imgs_torch = OrderedDict()\n",
    "        act = OrderedDict()\n",
    "        for cond in conditions:\n",
    "            imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "            if imgs[cond].ndim == 2:\n",
    "                imgs[cond] = tile(imgs[cond][:,:,None],(1,1,3))\n",
    "            elif imgs[cond].shape[2] == 4:\n",
    "                imgs[cond] = imgs[cond][:,:,:3]\n",
    "            try:\n",
    "                imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / hr_img_size**2))\n",
    "            except:\n",
    "                print('no downsampling: ' + img_names[cond])\n",
    "            imshow(imgs[cond]);show()\n",
    "        #color preservation\n",
    "        if cp_mode == 'lum':\n",
    "            org_content = imgs['content'].copy()\n",
    "            for cond in conditions:\n",
    "                imgs[cond] = lum_transform(imgs[cond])\n",
    "            imgs['style'] -= imgs['style'].mean(0).mean(0)\n",
    "            imgs['style'] += imgs['content'].mean(0).mean(0)\n",
    "            for cond in conditions:\n",
    "                imgs[cond][imgs[cond]<0] =0\n",
    "                imgs[cond][imgs[cond]>1] =1\n",
    "        elif cp_mode =='match':\n",
    "            imgs['style'] = match_color(imgs['style'], imgs['content'], mode='pca')\n",
    "        elif cp_mode == 'match_style':\n",
    "            imgs['content'] = match_color(imgs['content'], imgs['style'], mode='pca')\n",
    "        else:\n",
    "            raise NameError('Unknown colour preservation mode')\n",
    "        for cond in conditions:\n",
    "            imgs_torch[cond] = preprocess(imgs[cond])\n",
    "            act[cond] = get_activations(imgs_torch[cond],\n",
    "                                        caffe_model,\n",
    "                                        layers=layers['style'],\n",
    "                                        gpu=gpu\n",
    "                                       )\n",
    "\n",
    "        hr_init = img_as_float(scipy.misc.imresize(lr_output, imgs['content'].shape))\n",
    "        if cp_mode == 'lum':\n",
    "            hr_init = lum_transform(hr_init)\n",
    "        hr_init = preprocess(hr_init)\n",
    "        make_torch_init(init_file_name, hr_init)\n",
    "\n",
    "        args = OrderedDict()\n",
    "        args['style'] = [\n",
    "                            [\n",
    "                                {'targets': gram_matrix(act['style'][layer])[None,:],\n",
    "                                 'weights': weights['style'][l][0]} \n",
    "                            ] \n",
    "                        for l,layer in enumerate(layers['style'])]\n",
    "        act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "        args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "        layers_now = layers['style'] + layers['content']\n",
    "        args_now = args['style'] + args['content']\n",
    "\n",
    "        make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "        context = {\n",
    "            'caffe_model': caffe_model,\n",
    "            'input_file': input_file_name,\n",
    "            'init_file': init_file_name,\n",
    "            'gpu': gpu,\n",
    "            'max_iter': hr_max_iter,\n",
    "            'backend': 'cudnn',\n",
    "            'print_iter': 50,\n",
    "            'save_iter': 0,\n",
    "            'layer_order': list2css(layers_now),\n",
    "            'output_file': output_file_name,\n",
    "            'project_dir': project_dir\n",
    "        }\n",
    "        template = (\n",
    "                    '#!/bin/bash\\n' +\n",
    "                    'cd {project_dir} && ' + \n",
    "                    'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                    '-caffe_model {caffe_model} ' +\n",
    "                    '-input_file {input_file} ' + \n",
    "                    '-init_file {init_file} ' + \n",
    "                    '-gpu {gpu} ' + \n",
    "                    '-max_iter {max_iter} ' +\n",
    "                    '-print_iter {print_iter} ' +\n",
    "                    '-save_iter {save_iter} ' +\n",
    "                    '-backend {backend} ' + \n",
    "                    '-layer_order {layer_order} ' +\n",
    "                    '-output_file {output_file}'\n",
    "                   )\n",
    "\n",
    "        script_name = project_dir + '/run_synthesis.sh'\n",
    "        with open(script_name, 'w') as script:\n",
    "            script.write(template.format(**context))\n",
    "        os.chmod(script_name, 0o755)\n",
    "        #execute script \n",
    "        !{script_name}\n",
    "        output = deprocess(get_torch_output(output_file_name))\n",
    "        if cp_mode == 'lum':\n",
    "            org_content = rgb2luv(org_content)\n",
    "            org_content[:,:,0] = output.mean(2)\n",
    "            output = luv2rgb(org_content)\n",
    "            output[output<0] = 0\n",
    "            output[output>1]=1\n",
    "        imshow(output);gcf().set_size_inches(8,14);show()\n",
    "        imsave(result_dir + result_image_name, output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
