{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "%pylab inline\n",
    "import scipy\n",
    "import h5py\n",
    "import skimage\n",
    "import os\n",
    "from skimage import io,transform,img_as_float\n",
    "from skimage.io import imread,imsave\n",
    "from collections import OrderedDict\n",
    "import decimal\n",
    "notebook_dir = os.getcwd()\n",
    "project_dir = os.path.split(notebook_dir)[0]\n",
    "result_dir = project_dir + '/Results/Images/'\n",
    "if not os.path.isdir(result_dir):\n",
    "    os.makedirs(result_dir)\n",
    "tmp_dir = project_dir + '/Tmp/'\n",
    "if not os.path.isdir(tmp_dir):\n",
    "    os.makedirs(tmp_dir)\n",
    "photo_dir = project_dir + '/Images/ControlPaper/'\n",
    "art_dir = project_dir + '/Images/ControlPaper/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def make_torch_input(filename, layers, loss_functions, args):\n",
    "    f = h5py.File(filename,'w')\n",
    "    for l,layer in enumerate(layers):\n",
    "        layer_group = f.create_group(layer)\n",
    "        for lf,loss_function in enumerate(loss_functions[l]):\n",
    "            lf_group = layer_group.create_group(loss_function)\n",
    "            for arg in args[l][lf]:\n",
    "                dataset = lf_group.create_dataset(arg, data=args[l][lf][arg])\n",
    "    f.close()\n",
    "    \n",
    "def make_torch_init(filename, init):\n",
    "    f = h5py.File(filename,'w')\n",
    "    f.create_dataset('init', data=init)\n",
    "    f.close()\n",
    "\n",
    "def get_torch_output(filename):\n",
    "    f = h5py.File(filename,'r')\n",
    "    data = f['opt_result']\n",
    "    return data.value\n",
    "    f.close()\n",
    "def get_torch_loss(filename):\n",
    "    f = h5py.File(filename,'r')\n",
    "    data = f['losses']\n",
    "    return data.value\n",
    "    f.close()\n",
    "\n",
    "def list2css(layers):\n",
    "    '''\n",
    "    Takes list of strings and returns comma separated string\n",
    "    '''\n",
    "    css = str()\n",
    "    for l in layers:\n",
    "        css = css+str(l)+','\n",
    "    return css[:-1]\n",
    "\n",
    "def get_activations(images, caffe_model, layers='all', gpu=0):\n",
    "    '''\n",
    "    Function to get neural network activations in response to images from torch.\n",
    "    \n",
    "    :param images: array of images\n",
    "    :param caffe_model: file name of the network .caffemodel file\n",
    "    :param layers: network layers for which the activations should be computed\n",
    "    :return: network activations in response to images\n",
    "    '''\n",
    "    layers = list2css(layers)\n",
    "    images_file_name = tmp_dir + 'images.hdf5'\n",
    "    output_file_name = tmp_dir + 'activations.hdf5'\n",
    "    f = h5py.File(images_file_name, 'w')\n",
    "    f.create_dataset('images', data=images)\n",
    "    f.close()\n",
    "    context = {\n",
    "    'caffe_model': caffe_model,\n",
    "    'images': images_file_name,\n",
    "    'layers': layers,\n",
    "    'gpu': gpu,\n",
    "    'backend': 'cudnn',\n",
    "    'output_file': output_file_name,\n",
    "    'project_dir': project_dir\n",
    "    }\n",
    "    template = ('#!/bin/bash\\n' +\n",
    "                'cd {project_dir} && ' + \n",
    "                '/usr/local/torch/install/bin/th ComputeActivations.lua ' + \n",
    "                '-caffe_model {caffe_model} ' +\n",
    "                '-images {images} ' + \n",
    "                '-layers {layers} ' + \n",
    "                '-gpu {gpu} ' + \n",
    "                '-backend {backend} ' +\n",
    "                '-output_file {output_file}')\n",
    "    script_name = project_dir + '/get_activations.sh'\n",
    "    with open(script_name, 'w') as script:\n",
    "        script.write(template.format(**context))\n",
    "    os.chmod(script_name, 0o755)\n",
    "    #execute script \n",
    "    !{script_name} >/dev/null\n",
    "    f = h5py.File(output_file_name,'r')\n",
    "    act = OrderedDict()\n",
    "    for key in f.keys():\n",
    "        act[key] = f[key].value.copy()\n",
    "    f.close()\n",
    "    return act\n",
    "\n",
    "def preprocess(image):\n",
    "    assert(image.max() <= (1. + 1e3))\n",
    "    imagenet_mean = array([0.40760392,  0.45795686,  0.48501961])\n",
    "    image_torch = 255 * (image[:,:,::-1] - imagenet_mean).transpose(2,0,1)\n",
    "    return image_torch\n",
    "\n",
    "def deprocess(image_torch):\n",
    "    imagenet_mean = array([0.40760392,  0.45795686,  0.48501961])\n",
    "    image = (image_torch.transpose(1,2,0)/255. + imagenet_mean)[:,:,::-1]\n",
    "    image[image>1] = 1\n",
    "    image[image<0] = 0\n",
    "    return image\n",
    "\n",
    "def gram_matrix(activations):\n",
    "    n_fm = activations.shape[0]\n",
    "    F = activations.reshape(n_fm,-1)\n",
    "    G = F.dot(F.T) / F[0,:].size\n",
    "    return G\n",
    "\n",
    "def gram_matrix_guided(activations, guides):\n",
    "    '''\n",
    "    guides is array of dimensions (n_ch,h,w) that defines n_ch guidance channels\n",
    "    guides should be normalised as: guides = guides / np.sqrt(np.diag(gram_matrix(guides)))[:,None,None]\n",
    "    activations are of dimensions (n_fm,h,w), the n_fm feature maps of a CNN layer\n",
    "    Output are n_ch gram matrices, that were computed with the feature maps weighted by the guidance channel\n",
    "    '''\n",
    "    assert (guides.sum(1).sum(1)==0).sum()==0, 'Dont use empty guidance channels'\n",
    "    assert guides.dtype == float, 'Guides should be float'\n",
    "    n_fm = activations.shape[0]\n",
    "    n_ch = guides.shape[0]\n",
    "    G = np.zeros((n_ch,n_fm,n_fm))\n",
    "    for c in range(n_ch):\n",
    "        F = (activations * guides[c,:,:]).reshape(n_fm,-1)\n",
    "        G[c,:,:] = F.dot(F.T) / F[0,:].size\n",
    "    return G\n",
    "\n",
    "def mean_fm_guided(activations, guides):\n",
    "    assert (guides.sum(1).sum(1)==0).sum()==0, 'Dont use empty guidance channels'\n",
    "    assert guides.dtype == float, 'Guides should be float'\n",
    "    n_fm = activations.shape[0]\n",
    "    n_ch = guides.shape[0]\n",
    "    M = np.zeros((n_ch,n_fm))\n",
    "    for c in range(n_ch):\n",
    "        F = (activations * guides[c,:,:]).reshape(n_fm,-1)\n",
    "        M[c,:] = F.mean(1)\n",
    "    return M\n",
    "\n",
    "import itertools\n",
    "def flatten(l):\n",
    "    return list(itertools.chain.from_iterable(l))\n",
    "\n",
    "def set_model(name, project_dir):\n",
    "    if name == 'org_pad':\n",
    "        model = project_dir + '/Models/VGG_ILSVRC_19_layers_conv.caffemodel'\n",
    "    elif name == 'norm_pad':\n",
    "        model = project_dir + '/Models/vgg_normalised.caffemodel'\n",
    "    else:\n",
    "        assert False, 'unknown model name'\n",
    "    return model\n",
    "    \n",
    "def get_fm_guides(guides, caffe_model, layers, mode='simple', k=0, th=.5, batch_size=2):\n",
    "    '''\n",
    "    Function to get guides on the feature maps of layers given guides on the pixels\n",
    "    \n",
    "    :param guides: the pixel guides\n",
    "    :param project_dir: the main directory, needed for computing the activations\n",
    "    :param caffe_model: the network model to compute the guides for\n",
    "    :param layers: the layers on which to compute the guides\n",
    "    :param mode: the mode of obtaining the feature map guide: simple|all|inside, downsampling|all neurons that see region| neurons that see only region \n",
    "    :param k: optionally can extend or reduce fm guides by k pixels \n",
    "    :param th: threshold to make guides binary, only used for modes all|inside \n",
    "    :param batch_size: batch_size for probing which neurons see the guide\n",
    "    '''\n",
    "    fm_guides = OrderedDict()\n",
    "    if mode=='simple':\n",
    "        probe_image = zeros((3,) + guides.shape[:-1])\n",
    "        probe_image += 1e2 * randn(*probe_image.shape)\n",
    "        feature_maps = get_activations(probe_image, caffe_model, layers=layers)\n",
    "        for layer in layers:\n",
    "            sf = asarray(feature_maps[layer].shape).astype(float) / asarray(guides.transpose(2,0,1).shape)\n",
    "            sf[0] = 1\n",
    "            fm_guides[layer] = scipy.ndimage.zoom(guides.transpose(2,0,1), sf, mode='nearest')\n",
    "        if k != 0:\n",
    "            n_guides = guides.shape[2]\n",
    "            for layer in layers:\n",
    "                fm_guides[layer][fm_guides[layer]<th] = 0\n",
    "                fm_guides[layer][fm_guides[layer]>th] = 1\n",
    "                if k>0:\n",
    "                    #extend fm guides\n",
    "                    for m in range(n_guides):\n",
    "                        fm_guides[layer][m,:,:] = (scipy.ndimage.filters.uniform_filter(fm_guides[layer][m,:,:],(2*k+1))>=(1./(2*k+1)**2)).astype(float)\n",
    "                elif k<0:\n",
    "                    #reduce fm guides\n",
    "                    for m in range(n_guides):\n",
    "                        fm_guides[layer][m,:,:][scipy.ndimage.filters.uniform_filter(fm_guides[layer][m,:,:],(2*(-k)+1))<(1-1./(2*(-k)+1)**2)] = 0.\n",
    "    else:\n",
    "        n_guides = guides.shape[2]\n",
    "        for m in range(n_guides):\n",
    "            guide = guides[:,:,m]\n",
    "            guide[guide<th] = 0\n",
    "            guide[guide>=th] = 1\n",
    "\n",
    "            if mode=='all':\n",
    "                probe_image = zeros((batch_size, 3,) + guide.shape)\n",
    "                probe_image[:,:,guide.astype(bool)] += 1e2 * randn(*probe_image[:,:,guide.astype(bool)].shape)\n",
    "                feature_maps = get_activations(probe_image, caffe_model, layers=layers)\n",
    "                for layer in layers:\n",
    "                    if m==0:\n",
    "                        fm_guides[layer] = []\n",
    "                    fm_guides[layer].append((feature_maps[layer].var(0).mean(0)!=0).astype(float))\n",
    "\n",
    "            elif mode=='inside':\n",
    "                inv_guide = guide.copy()-1\n",
    "                inv_guide *= -1\n",
    "                probe_image_out = zeros((batch_size, 3,) + inv_guide.shape)\n",
    "                probe_image_out[:,:,inv_guide.astype(bool)] += 1e2 * randn(*probe_image_out[:,:,inv_guide.astype(bool)].shape)\n",
    "                feature_maps_out = get_activations(probe_image_out, caffe_model, layers=layers)\n",
    "                for layer in layers:\n",
    "                    if m==0:\n",
    "                        fm_guides[layer] = []\n",
    "                    fm_guides[layer].append((feature_maps_out[layer].var(0).mean(0)==0).astype(float))\n",
    "        for layer in layers:\n",
    "            fm_guides[layer] = np.stack(fm_guides[layer])\n",
    "            \n",
    "        if k>0:\n",
    "            #extend fm guides\n",
    "            for layer in layers:\n",
    "                for m in range(n_guides):\n",
    "                    fm_guides[layer][m,:,:] = (scipy.ndimage.filters.uniform_filter(fm_guides[layer][m,:,:],(2*k+1))>=(1./(2*k+1)**2)).astype(float)\n",
    "        elif k<0:\n",
    "            #reduce fm guides\n",
    "            for layer in layers:\n",
    "                for m in range(n_guides):\n",
    "                    fm_guides[layer][m,:,:][scipy.ndimage.filters.uniform_filter(fm_guides[layer][m,:,:],(2*(-k)+1))<(1-1./(2*(-k)+1)**2)] = 0.\n",
    "    return fm_guides"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Guided Gram Matrices "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## One Style Image"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### org net"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "img_dirs = OrderedDict()\n",
    "img_names = OrderedDict()\n",
    "guide_dirs = OrderedDict()\n",
    "img_dirs['content'] = photo_dir\n",
    "img_dirs['style'] = art_dir\n",
    "img_names['style'] = 'fig2_style1.jpg'\n",
    "img_names['content'] = 'fig2_content.jpg'\n",
    "guide_dirs['content'] = photo_dir\n",
    "guide_dirs['style'] = art_dir\n",
    "regions = ['sky','nosky']\n",
    "layers = OrderedDict()\n",
    "layers['style'] = ['relu1_1','relu2_1','relu3_1','relu4_1','relu5_1']\n",
    "layers['content'] =  ['relu4_2']\n",
    "layers_now = layers['style'] + layers['content']\n",
    "loss_functions = [['GramMSEGuided']] * len(layers['style']) + [['MSE']]\n",
    "\n",
    "guide_mode = 'inside'\n",
    "k = 3\n",
    "th = .5 #threshold for guides after downsampling, only relevant for modes inside and all or k != 0\n",
    "img_size = 512\n",
    "hr_img_size = 1024\n",
    "max_iter = 500\n",
    "hr_max_iter = 200\n",
    "gpu = 0\n",
    "model_name = 'org_pad'\n",
    "caffe_model = set_model(model_name, project_dir)\n",
    "\n",
    "input_file_name = tmp_dir + 'input_sc.hdf5'\n",
    "init_file_name = tmp_dir + 'init_sc.hdf5'\n",
    "output_file_name = tmp_dir + 'output_sc.hdf5'\n",
    "\n",
    "n_ch = len(regions)\n",
    "sw = 1e3\n",
    "gw = [1./n_ch]*n_ch\n",
    "cw = 1\n",
    "weights = OrderedDict()\n",
    "weights['style'] = [\n",
    "    [array([sw/(64**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(128**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(256**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(512**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(512**2)]*n_ch)*array(gw)],\n",
    "]\n",
    "weights['content'] = [[array([cw])]]\n",
    "\n",
    "result_image_name = (\n",
    "'cimg_' + img_names['content'] + \n",
    "'_simg_' + img_names['style'] + \n",
    "'_regions_' + list2css(regions) + \n",
    "'_mm_' + str(guide_mode) + \n",
    "'_k_' + str(k) + \n",
    "'_sz_' + str(img_size) + \n",
    "'_model_' + model_name + \n",
    "'_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "'_cw_' + '%.1E'%decimal.Decimal(cw) + \n",
    "'_gw_' + list2css(['%.1E'%decimal.Decimal(i) for i in gw]) + \n",
    "# '_gw_equal_' + '%.1E'%decimal.Decimal(gw[0]) +  \n",
    "'_layers_' + list2css(layers_now) + \n",
    "'_guided_gram' + \n",
    "'.jpg'\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false,
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "#check if image exists\n",
    "if os.path.isfile(result_dir + result_image_name) == False:\n",
    "    #get images\n",
    "    conditions = img_names.keys()\n",
    "    imgs = OrderedDict()\n",
    "    imgs_torch = OrderedDict()\n",
    "    act = OrderedDict()\n",
    "    guides = OrderedDict()\n",
    "    fm_guides = OrderedDict()\n",
    "    for cond in conditions:\n",
    "        imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "        try:\n",
    "            imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / img_size**2))\n",
    "        except:\n",
    "            print('no downsampling: ' + img_names[cond])\n",
    "        imshow(imgs[cond]);show()\n",
    "        imgs_torch[cond] = preprocess(imgs[cond])\n",
    "        act[cond] = get_activations(imgs_torch[cond],\n",
    "                                    caffe_model,\n",
    "                                    layers=layers['style'],\n",
    "                                    gpu=gpu\n",
    "                                   )\n",
    "        #get guides\n",
    "        guides[cond] = []\n",
    "        for r,region in enumerate(regions):\n",
    "            print region\n",
    "            if region =='all': \n",
    "                guide = ones(imgs[cond].shape[:2])[:,:,None]\n",
    "            else:\n",
    "                guide = img_as_float(imread(guide_dirs[cond] + img_names[cond].rstrip('.jpg')+'_'+region+'.jpg'))\n",
    "                if guide.ndim==2:\n",
    "                    guide = guide[:,:,None]\n",
    "                else:\n",
    "                    guide = guide[:,:,:1]\n",
    "                try:\n",
    "                    guide = transform.pyramid_reduce(guide, sqrt(float(guide[:,:,0].size) / img_size**2))\n",
    "                except:\n",
    "                    pass\n",
    "            imshow(guide.squeeze());colorbar();show()\n",
    "            guides[cond].append(guide.copy())\n",
    "        guides[cond] = dstack(guides[cond])\n",
    "        # guides for all style featuremaps\n",
    "        fm_guides[cond] = get_fm_guides(guides[cond],caffe_model,layers=layers['style'],mode=guide_mode,th=th,k=k)\n",
    "        for layer in layers['style']: #normalise fm guides\n",
    "            fm_guides[cond][layer] = fm_guides[cond][layer]/sqrt(diag(gram_matrix(fm_guides[cond][layer])))[:,None,None]\n",
    "\n",
    "    make_torch_init(init_file_name, imgs_torch['content'])\n",
    "\n",
    "    args = OrderedDict()\n",
    "    args['style'] = [\n",
    "                        [\n",
    "                            {'targets': gram_matrix_guided(act['style'][layer], fm_guides['style'][layer]),\n",
    "                             'weights': weights['style'][l][0], \n",
    "                             'guides':  fm_guides['content'][layer]\n",
    "                            }\n",
    "                        ] \n",
    "                    for l,layer in enumerate(layers['style'])]\n",
    "\n",
    "    act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "    args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "    args_now = args['style'] + args['content']\n",
    "\n",
    "    make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "    context = {\n",
    "        'caffe_model': caffe_model,\n",
    "        'input_file': input_file_name,\n",
    "        'init_file': init_file_name,\n",
    "        'gpu': gpu,\n",
    "        'max_iter': max_iter,\n",
    "        'backend': 'cudnn',\n",
    "        'print_iter': 50,\n",
    "        'save_iter': 0,\n",
    "        'layer_order': list2css(layers_now),\n",
    "        'output_file': output_file_name,\n",
    "        'project_dir': project_dir\n",
    "    }\n",
    "    template = ('#!/bin/bash\\n' +\n",
    "                'cd {project_dir} && ' + \n",
    "                'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                '-caffe_model {caffe_model} ' +\n",
    "                '-input_file {input_file} ' + \n",
    "                '-init_file {init_file} ' + \n",
    "                '-gpu {gpu} ' + \n",
    "                '-max_iter {max_iter} ' +\n",
    "                '-print_iter {print_iter} ' +\n",
    "                '-save_iter {save_iter} ' +\n",
    "                '-backend {backend} ' + \n",
    "                '-layer_order {layer_order} ' +\n",
    "                '-output_file {output_file}'\n",
    "               )\n",
    "\n",
    "    script_name = project_dir + '/run_synthesis.sh'\n",
    "    with open(script_name, 'w') as script:\n",
    "        script.write(template.format(**context))\n",
    "    os.chmod(script_name, 0o755)\n",
    "    #execute script\n",
    "    !{script_name}\n",
    "    output = deprocess(get_torch_output(output_file_name))\n",
    "    imshow(output);gcf().set_size_inches(8,14);show()\n",
    "    imsave(result_dir + result_image_name, output)\n",
    "\n",
    "#make it highres\n",
    "if hr_img_size:\n",
    "    result_image_name = (\n",
    "    'cimg_' + img_names['content'] + \n",
    "    '_simg_' + img_names['style'] + \n",
    "    '_regions_' + list2css(regions) + \n",
    "    '_mm_' + str(guide_mode) + \n",
    "    '_k_' + str(k) + \n",
    "    '_sz_' + str(img_size) + \n",
    "    '_hrsz_' + str(hr_img_size) + \n",
    "    '_model_' + model_name + \n",
    "    '_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "    '_cw_' + '%.1E'%decimal.Decimal(cw) + \n",
    "    '_gw_' + list2css(['%.1E'%decimal.Decimal(i) for i in gw]) + \n",
    "    # '_gw_equal_' + '%.1E'%decimal.Decimal(gw[0]) +  \n",
    "    '_layers_' + list2css(layers_now) + \n",
    "    '_guided_gram' + \n",
    "    '.jpg'\n",
    "    )\n",
    "    #check if image exists\n",
    "    if os.path.isfile(result_dir + result_image_name) == False:\n",
    "        conditions = img_names.keys()\n",
    "        imgs = OrderedDict()\n",
    "        imgs_torch = OrderedDict()\n",
    "        act = OrderedDict()\n",
    "        guides = OrderedDict()\n",
    "        fm_guides = OrderedDict()\n",
    "        for cond in conditions:\n",
    "            imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "            try:\n",
    "                imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / hr_img_size**2))\n",
    "            except:\n",
    "                print('no downsampling: ' + img_names[cond])\n",
    "            imshow(imgs[cond]);show()\n",
    "            imgs_torch[cond] = preprocess(imgs[cond])\n",
    "            act[cond] = get_activations(imgs_torch[cond],\n",
    "                                        caffe_model,\n",
    "                                        layers=layers['style'],\n",
    "                                        gpu=gpu\n",
    "                                       )\n",
    "            #get guides\n",
    "            guides[cond] = []\n",
    "            for r,region in enumerate(regions):\n",
    "                print region\n",
    "                if region =='all': \n",
    "                    guide = ones(imgs[cond].shape[:2])[:,:,None]\n",
    "                else:\n",
    "                    guide = img_as_float(imread(guide_dirs[cond] + img_names[cond].rstrip('.jpg')+'_'+region+'.jpg'))\n",
    "                    if guide.ndim==2:\n",
    "                        guide = guide[:,:,None]\n",
    "                    else:\n",
    "                        guide = guide[:,:,:1]\n",
    "                    try:\n",
    "                        guide = transform.pyramid_reduce(guide, sqrt(float(guide[:,:,0].size) / hr_img_size**2))\n",
    "                    except:\n",
    "                        pass\n",
    "                imshow(guide.squeeze());colorbar();show()\n",
    "                guides[cond].append(guide.copy())\n",
    "            guides[cond] = dstack(guides[cond])\n",
    "            # guides for all style featuremaps\n",
    "            fm_guides[cond] = get_fm_guides(guides[cond],caffe_model,layers=layers['style'],mode=guide_mode,th=th,k=k)\n",
    "            for layer in layers['style']: #normalise fm guides\n",
    "                fm_guides[cond][layer] = fm_guides[cond][layer]/sqrt(diag(gram_matrix(fm_guides[cond][layer])))[:,None,None]\n",
    "\n",
    "        hr_init = preprocess(scipy.misc.imresize(output, imgs['content'].shape)/255.)\n",
    "        make_torch_init(init_file_name, hr_init)\n",
    "\n",
    "        args = OrderedDict()\n",
    "        args['style'] = [\n",
    "                            [\n",
    "                                {'targets': gram_matrix_guided(act['style'][layer], fm_guides['style'][layer]),\n",
    "                                 'weights': weights['style'][l][0], \n",
    "                                 'guides':  fm_guides['content'][layer]\n",
    "                                }\n",
    "                            ] \n",
    "                        for l,layer in enumerate(layers['style'])]\n",
    "\n",
    "        act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "        args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "        layers_now = layers['style'] + layers['content']\n",
    "        loss_functions = [['GramMSEGuided']] * len(layers['style']) + [['MSE']]\n",
    "        args_now = args['style'] + args['content']\n",
    "\n",
    "        make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "        context = {\n",
    "            'caffe_model': caffe_model,\n",
    "            'input_file': input_file_name,\n",
    "            'init_file': init_file_name,\n",
    "            'gpu': gpu,\n",
    "            'max_iter': hr_max_iter,\n",
    "            'backend': 'cudnn',\n",
    "            'print_iter': 50,\n",
    "            'save_iter': 0,\n",
    "            'layer_order': list2css(layers_now),\n",
    "            'output_file': output_file_name,\n",
    "            'project_dir': project_dir\n",
    "        }\n",
    "        template = ('#!/bin/bash\\n' +\n",
    "                    'cd {project_dir} && ' + \n",
    "                    'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                    '-caffe_model {caffe_model} ' +\n",
    "                    '-input_file {input_file} ' + \n",
    "                    '-init_file {init_file} ' + \n",
    "                    '-gpu {gpu} ' + \n",
    "                    '-max_iter {max_iter} ' +\n",
    "                    '-print_iter {print_iter} ' +\n",
    "                    '-save_iter {save_iter} ' +\n",
    "                    '-backend {backend} ' + \n",
    "                    '-layer_order {layer_order} ' +\n",
    "                    '-output_file {output_file}'\n",
    "                   )\n",
    "\n",
    "        script_name = project_dir + '/run_synthesis.sh'\n",
    "        with open(script_name, 'w') as script:\n",
    "            script.write(template.format(**context))\n",
    "        os.chmod(script_name, 0o755)\n",
    "        #execute script\n",
    "        !{script_name}\n",
    "        output = deprocess(get_torch_output(output_file_name))\n",
    "        imshow(output);gcf().set_size_inches(8,14);show()\n",
    "        imsave(result_dir + result_image_name, output)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Multiple style images"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### org net"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "style_img_names = ['fig2_style1.jpg','fig2_style2.jpg']\n",
    "style_regions = [['nosky'],['sky']]\n",
    "img_dirs = OrderedDict()\n",
    "img_names = OrderedDict()\n",
    "guide_names = OrderedDict()\n",
    "guide_dirs = OrderedDict()\n",
    "img_dirs['content'] = photo_dir\n",
    "img_names['content'] = 'fig2_content.jpg'\n",
    "guide_dirs['content'] = photo_dir\n",
    "guide_names['content'] = ['nosky', 'sky']\n",
    "for n, name in enumerate(style_img_names):\n",
    "    img_names['style'+str(n)] = name\n",
    "    img_dirs['style'+str(n)] = art_dir\n",
    "    guide_dirs['style'+str(n)] = art_dir\n",
    "    guide_names['style'+str(n)] = style_regions[n]\n",
    "for r, region in enumerate(style_regions):\n",
    "    guide_names['style'+str(r)] = region\n",
    "assert(len(style_regions) == len(guide_names['content']))\n",
    "layers = OrderedDict()\n",
    "layers['style'] = ['relu1_1','relu2_1','relu3_1','relu4_1','relu5_1']\n",
    "layers['content'] =  ['relu4_2']\n",
    "layers_now = layers['style'] + layers['content']\n",
    "loss_functions = [['GramMSEGuided']] * len(layers['style']) + [['MSE']]\n",
    "\n",
    "guide_mode = 'simple'\n",
    "k = 0\n",
    "th = .5 #threshold for guides after downsampling\n",
    "img_size = 512\n",
    "hr_img_size = 1024\n",
    "max_iter = 500\n",
    "hr_max_iter = 200\n",
    "gpu = 0\n",
    "model_name = 'org_pad'\n",
    "caffe_model = set_model(model_name, project_dir)\n",
    "\n",
    "input_file_name = tmp_dir + 'input_sc.hdf5'\n",
    "init_file_name = tmp_dir + 'init_sc.hdf5'\n",
    "output_file_name = tmp_dir + 'output_sc.hdf5'\n",
    "n_ch = len(guide_names['content'])\n",
    "sw = 1e3\n",
    "gw = [1./n_ch]*n_ch\n",
    "# gw = [1./100,1./100,98./100]\n",
    "cw = 1\n",
    "weights = OrderedDict()\n",
    "weights['style'] = [\n",
    "    [array([sw/(64**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(128**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(256**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(512**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(512**2)]*n_ch)*array(gw)],\n",
    "]\n",
    "weights['content'] = [[array([cw])]]\n",
    "\n",
    "result_image_name = (\n",
    "'cimg_' + img_names['content'] + \n",
    "'_simg_' + list2css(style_img_names) + \n",
    "'_creg_' + list2css(guide_names['content']) +\n",
    "'_sreg_' + list2css([list2css(region) for region in style_regions]) + \n",
    "'_mm_' + str(guide_mode) + \n",
    "'_k_' + str(k) + \n",
    "'_sz_' + str(img_size) + \n",
    "'_model_' + model_name + \n",
    "'_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "'_cw_' + '%.1E'%decimal.Decimal(cw) + \n",
    "# '_gw_' + list2css(['%.1E'%decimal.Decimal(i) for i in gw]) + \n",
    "'_gw_equal_' + '%.1E'%decimal.Decimal(gw[0]) +  \n",
    "'_layers_usual'+# + list2css(layers_now) + \n",
    "'_guided_gram' + \n",
    "'.jpg'\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "if os.path.isfile(result_dir + result_image_name) == False:\n",
    "    #get images\n",
    "    conditions = img_names.keys()\n",
    "    imgs = OrderedDict()\n",
    "    imgs_torch = OrderedDict()\n",
    "    act = OrderedDict()\n",
    "    guides = OrderedDict()\n",
    "    fm_guides = OrderedDict()\n",
    "    for cond in conditions:\n",
    "        imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "        try:\n",
    "            imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / img_size**2))\n",
    "        except:\n",
    "            print('no downsampling: ' + img_names[cond])\n",
    "        imshow(imgs[cond]);show()\n",
    "        imgs_torch[cond] = preprocess(imgs[cond])\n",
    "        act[cond] = get_activations(imgs_torch[cond],\n",
    "                                    caffe_model,\n",
    "                                    layers=layers['style'],\n",
    "                                    gpu=gpu\n",
    "                                   )\n",
    "        #get guides\n",
    "        guides[cond] = []\n",
    "        for r,region in enumerate(guide_names[cond]):\n",
    "            print region\n",
    "            if region =='all': \n",
    "                guide = ones(imgs[cond].shape[:2])[:,:,None]\n",
    "            else:\n",
    "                guide = img_as_float(imread(guide_dirs[cond] + img_names[cond].rstrip('.jpg')+'_'+region+'.jpg'))\n",
    "                if guide.ndim==2:\n",
    "                    guide = guide[:,:,None]\n",
    "                else:\n",
    "                    guide = guide[:,:,:1]\n",
    "                try:\n",
    "                    guide = transform.pyramid_reduce(guide, sqrt(float(guide[:,:,0].size) / img_size**2))\n",
    "                except:\n",
    "                    pass\n",
    "            imshow(guide.squeeze());colorbar();show()\n",
    "            guides[cond].append(guide.copy())\n",
    "        guides[cond] = dstack(guides[cond])\n",
    "        # guides for all style featuremaps\n",
    "        fm_guides[cond] = get_fm_guides(guides[cond],caffe_model,layers=layers['style'],mode=guide_mode,th=th,k=k)\n",
    "        for layer in layers['style']: #normalise fm guides\n",
    "            fm_guides[cond][layer] = fm_guides[cond][layer]/sqrt(diag(gram_matrix(fm_guides[cond][layer])))[:,None,None]\n",
    "\n",
    "    make_torch_init(init_file_name, imgs_torch['content'])\n",
    "\n",
    "    args = OrderedDict()\n",
    "    args['style'] = [\n",
    "                        [\n",
    "                            {'targets': vstack([gram_matrix_guided(act['style'+str(s)][layer],\n",
    "                                                                   fm_guides['style'+str(s)][layer])\n",
    "                                                for s,_ in enumerate(style_img_names)]),\n",
    "                             'weights': weights['style'][l][0], \n",
    "                             'guides':  fm_guides['content'][layer]\n",
    "                            }\n",
    "                        ] \n",
    "                    for l,layer in enumerate(layers['style'])]\n",
    "\n",
    "    act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "    args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "    args_now = args['style'] + args['content']\n",
    "\n",
    "    make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "    context = {\n",
    "        'caffe_model': caffe_model,\n",
    "        'input_file': input_file_name,\n",
    "        'init_file': init_file_name,\n",
    "        'gpu': gpu,\n",
    "        'max_iter': max_iter,\n",
    "        'backend': 'cudnn',\n",
    "        'print_iter': 50,\n",
    "        'save_iter': 0,\n",
    "        'layer_order': list2css(layers_now),\n",
    "        'output_file': output_file_name,\n",
    "        'project_dir': project_dir\n",
    "    }\n",
    "    template = ('#!/bin/bash\\n' +\n",
    "                'cd {project_dir} && ' + \n",
    "                'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                '-caffe_model {caffe_model} ' +\n",
    "                '-input_file {input_file} ' + \n",
    "                '-init_file {init_file} ' + \n",
    "                '-gpu {gpu} ' + \n",
    "                '-max_iter {max_iter} ' +\n",
    "                '-print_iter {print_iter} ' +\n",
    "                '-save_iter {save_iter} ' +\n",
    "                '-backend {backend} ' + \n",
    "                '-layer_order {layer_order} ' +\n",
    "                '-output_file {output_file}'\n",
    "               )\n",
    "\n",
    "    script_name = project_dir + '/run_synthesis.sh'\n",
    "    with open(script_name, 'w') as script:\n",
    "        script.write(template.format(**context))\n",
    "    os.chmod(script_name, 0o755)\n",
    "    #execute script\n",
    "    !{script_name}\n",
    "    output = deprocess(get_torch_output(output_file_name))\n",
    "    imshow(output);gcf().set_size_inches(8,14);show()\n",
    "    imsave(result_dir + result_image_name, output)\n",
    "    if hr_img_size:\n",
    "        #make highres\n",
    "        result_image_name = (\n",
    "        'cimg_' + img_names['content'] + \n",
    "        '_simg_' + list2css(style_img_names) + \n",
    "        '_creg_' + list2css(guide_names['content']) +\n",
    "        '_sreg_' + list2css([list2css(region) for region in style_regions]) + \n",
    "        '_mm_' + str(guide_mode) + \n",
    "        '_k_' + str(k) + \n",
    "        '_sz_' + str(img_size) + \n",
    "        '_hrsz_' + str(hr_img_size) + \n",
    "        '_model_' + model_name + \n",
    "        '_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "        '_cw_' + '%.1E'%decimal.Decimal(cw) + \n",
    "        # '_gw_' + list2css(['%.1E'%decimal.Decimal(i) for i in gw]) + \n",
    "        '_gw_equal_' + '%.1E'%decimal.Decimal(gw[0]) +  \n",
    "        '_layers_usual'+# + list2css(layers_now) + \n",
    "        '_guided_gram' + \n",
    "        '.jpg'\n",
    "        )\n",
    "        if os.path.isfile(result_dir + result_image_name) == False:\n",
    "            #get images\n",
    "            conditions = img_names.keys()\n",
    "            imgs = OrderedDict()\n",
    "            imgs_torch = OrderedDict()\n",
    "            act = OrderedDict()\n",
    "            guides = OrderedDict()\n",
    "            fm_guides = OrderedDict()\n",
    "            for cond in conditions:\n",
    "                imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "                try:\n",
    "                    imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / hr_img_size**2))\n",
    "                except:\n",
    "                    print('no downsampling: ' + img_names[cond])\n",
    "                imshow(imgs[cond]);show()\n",
    "                imgs_torch[cond] = preprocess(imgs[cond])\n",
    "                act[cond] = get_activations(imgs_torch[cond],\n",
    "                                            caffe_model,\n",
    "                                            layers=layers['style'],\n",
    "                                            gpu=gpu\n",
    "                                           )\n",
    "                #get guides\n",
    "                guides[cond] = []\n",
    "                for r,region in enumerate(guide_names[cond]):\n",
    "                    print region\n",
    "                    if region =='all': \n",
    "                        guide = ones(imgs[cond].shape[:2])[:,:,None]\n",
    "                    else:\n",
    "                        guide = img_as_float(imread(guide_dirs[cond] + img_names[cond].rstrip('.jpg')+'_'+region+'.jpg'))\n",
    "                        if guide.ndim==2:\n",
    "                            guide = guide[:,:,None]\n",
    "                        else:\n",
    "                            guide = guide[:,:,:1]\n",
    "                        try:\n",
    "                            guide = transform.pyramid_reduce(guide, sqrt(float(guide[:,:,0].size) / hr_img_size**2))\n",
    "                        except:\n",
    "                            pass\n",
    "                    imshow(guide.squeeze());colorbar();show()\n",
    "                    guides[cond].append(guide.copy())\n",
    "                guides[cond] = dstack(guides[cond])\n",
    "                # guides for all style featuremaps\n",
    "                fm_guides[cond] = get_fm_guides(guides[cond],caffe_model,layers=layers['style'],mode=guide_mode,th=th,k=k)\n",
    "                for layer in layers['style']: #normalise fm guides\n",
    "                    fm_guides[cond][layer] = fm_guides[cond][layer]/sqrt(diag(gram_matrix(fm_guides[cond][layer])))[:,None,None]\n",
    "\n",
    "            hr_init = preprocess(scipy.misc.imresize(output, imgs['content'].shape)/255.)\n",
    "            make_torch_init(init_file_name, hr_init)\n",
    "\n",
    "            args = OrderedDict()\n",
    "            args['style'] = [\n",
    "                                [\n",
    "                                    {'targets': vstack([gram_matrix_guided(act['style'+str(s)][layer],\n",
    "                                                                           fm_guides['style'+str(s)][layer])\n",
    "                                                        for s,_ in enumerate(style_img_names)]),\n",
    "                                     'weights': weights['style'][l][0], \n",
    "                                     'guides':  fm_guides['content'][layer]\n",
    "                                    }\n",
    "                                ] \n",
    "                            for l,layer in enumerate(layers['style'])]\n",
    "\n",
    "            act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "            args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "            args_now = args['style'] + args['content']\n",
    "\n",
    "            make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "            context = {\n",
    "                'caffe_model': caffe_model,\n",
    "                'input_file': input_file_name,\n",
    "                'init_file': init_file_name,\n",
    "                'gpu': gpu,\n",
    "                'max_iter': hr_max_iter,\n",
    "                'backend': 'cudnn',\n",
    "                'print_iter': 50,\n",
    "                'save_iter': 0,\n",
    "                'layer_order': list2css(layers_now),\n",
    "                'output_file': output_file_name,\n",
    "                'project_dir': project_dir\n",
    "            }\n",
    "            template = ('#!/bin/bash\\n' +\n",
    "                        'cd {project_dir} && ' + \n",
    "                        'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                        '-caffe_model {caffe_model} ' +\n",
    "                        '-input_file {input_file} ' + \n",
    "                        '-init_file {init_file} ' + \n",
    "                        '-gpu {gpu} ' + \n",
    "                        '-max_iter {max_iter} ' +\n",
    "                        '-print_iter {print_iter} ' +\n",
    "                        '-save_iter {save_iter} ' +\n",
    "                        '-backend {backend} ' + \n",
    "                        '-layer_order {layer_order} ' +\n",
    "                        '-output_file {output_file}'\n",
    "                       )\n",
    "\n",
    "            script_name = project_dir + '/run_synthesis.sh'\n",
    "            with open(script_name, 'w') as script:\n",
    "                script.write(template.format(**context))\n",
    "            os.chmod(script_name, 0o755)\n",
    "            #execute script\n",
    "            !{script_name}\n",
    "            output = deprocess(get_torch_output(output_file_name))\n",
    "            imshow(output);gcf().set_size_inches(8,14);show()\n",
    "            imsave(result_dir + result_image_name, output)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Guided sums with Gram "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##  One style image"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### org net "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "img_dirs = OrderedDict()\n",
    "img_names = OrderedDict()\n",
    "guide_dirs = OrderedDict()\n",
    "img_dirs['content'] = photo_dir\n",
    "img_dirs['style'] = art_dir\n",
    "img_names['style'] = 'fig2_style1.jpg'\n",
    "img_names['content'] = 'fig2_content.jpg'\n",
    "guide_dirs['content'] = photo_dir\n",
    "guide_dirs['style'] = art_dir\n",
    "regions = ['sky','nosky']\n",
    "layers = OrderedDict()\n",
    "layers['style'] = ['relu1_1','relu2_1','relu3_1','relu4_1','relu5_1']\n",
    "layers['content'] =  ['relu4_2']\n",
    "layers_now = layers['style'] + layers['content']\n",
    "loss_functions = [['MeanMSEGuided', 'GramMSE']] * len(layers['style']) + [['MSE']]\n",
    "\n",
    "guide_mode = 'inside'\n",
    "k = 0\n",
    "th = .5 #threshold for guides after downsampling, only relevant for modes inside and all or k != 0\n",
    "img_size = 512\n",
    "hr_img_size = 1024\n",
    "max_iter = 500\n",
    "hr_max_iter = 200\n",
    "gpu = 0\n",
    "model_name = 'org_pad'\n",
    "caffe_model = set_model(model_name, project_dir)\n",
    "\n",
    "input_file_name = tmp_dir + 'input_sc.hdf5'\n",
    "init_file_name = tmp_dir + 'init_sc.hdf5'\n",
    "output_file_name = tmp_dir + 'output_sc.hdf5'\n",
    "\n",
    "n_ch = len(regions)\n",
    "sw = 1e3\n",
    "mw = 1e9\n",
    "gw = [1./n_ch]*n_ch\n",
    "cw = 1\n",
    "weights = OrderedDict()\n",
    "weights['style'] = [\n",
    "    [array([mw/(64**2)]*n_ch)*array(gw), array([sw/(64**2)])],\n",
    "    [array([mw/(128**2)]*n_ch)*array(gw), array([sw/(128**2)])],\n",
    "    [array([mw/(256**2)]*n_ch)*array(gw), array([sw/(256**2)])],\n",
    "    [array([mw/(512**2)]*n_ch)*array(gw), array([sw/(512**2)])],\n",
    "    [array([mw/(512**2)]*n_ch)*array(gw), array([sw/(512**2)])],\n",
    "]\n",
    "weights['content'] = [[array([cw])]]\n",
    "\n",
    "result_image_name = (\n",
    "'cimg_' + img_names['content'] + \n",
    "'_simg_' + img_names['style'] + \n",
    "'_regions_' + list2css(regions) + \n",
    "'_mm_' + str(guide_mode) + \n",
    "'_k_' + str(k) + \n",
    "'_sz_' + str(img_size) + \n",
    "'_model_' + model_name + \n",
    "'_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "'_mw_' + '%.1E'%decimal.Decimal(mw) +  \n",
    "'_cw_' + '%.1E'%decimal.Decimal(cw) +\n",
    "'_gw_' + list2css(['%.1E'%decimal.Decimal(i) for i in gw]) + \n",
    "# '_gw_equal_' + '%.1E'%decimal.Decimal(gw[0]) +  \n",
    "'_layers_' + list2css(layers_now) + \n",
    "'_gram_guided_mean' + \n",
    "'.jpg'\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "#check if image exists\n",
    "if os.path.isfile(result_dir + result_image_name) == False:\n",
    "    #get images\n",
    "    conditions = img_names.keys()\n",
    "    imgs = OrderedDict()\n",
    "    imgs_torch = OrderedDict()\n",
    "    act = OrderedDict()\n",
    "    guides = OrderedDict()\n",
    "    fm_guides = OrderedDict()\n",
    "    for cond in conditions:\n",
    "        imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "        try:\n",
    "            imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / img_size**2))\n",
    "        except:\n",
    "            print('no downsampling: ' + img_names[cond])\n",
    "        imshow(imgs[cond]);show()\n",
    "        imgs_torch[cond] = preprocess(imgs[cond])\n",
    "        act[cond] = get_activations(imgs_torch[cond],\n",
    "                                    caffe_model,\n",
    "                                    layers=layers['style'],\n",
    "                                    gpu=gpu\n",
    "                                   )\n",
    "        #get guides\n",
    "        guides[cond] = []\n",
    "        for r,region in enumerate(regions):\n",
    "            print region\n",
    "            if region =='all': \n",
    "                guide = ones(imgs[cond].shape[:2])[:,:,None]\n",
    "            else:\n",
    "                guide = img_as_float(imread(guide_dirs[cond] + img_names[cond].rstrip('.jpg')+'_'+region+'.jpg'))\n",
    "                if guide.ndim==2:\n",
    "                    guide = guide[:,:,None]\n",
    "                else:\n",
    "                    guide = guide[:,:,:1]\n",
    "                try:\n",
    "                    guide = transform.pyramid_reduce(guide, sqrt(float(guide[:,:,0].size) / img_size**2))\n",
    "                except:\n",
    "                    pass\n",
    "            imshow(guide.squeeze());colorbar();show()\n",
    "            guides[cond].append(guide.copy())\n",
    "        guides[cond] = dstack(guides[cond])\n",
    "        # guides for all style featuremaps\n",
    "        fm_guides[cond] = get_fm_guides(guides[cond],caffe_model,layers=layers['style'],mode=guide_mode,th=th,k=k)\n",
    "        for layer in layers['style']: #normalise fm guides\n",
    "            fm_guides[cond][layer] = fm_guides[cond][layer]/sqrt(diag(gram_matrix(fm_guides[cond][layer])))[:,None,None]\n",
    "\n",
    "    make_torch_init(init_file_name, imgs_torch['content'])\n",
    "\n",
    "    args = OrderedDict()\n",
    "    args['style'] = [\n",
    "                        [\n",
    "                            {'targets': mean_fm_guided(act['style'][layer], fm_guides['style'][layer]),\n",
    "                             'weights': weights['style'][l][0], \n",
    "                             'guides':  fm_guides['content'][layer]\n",
    "                            },\n",
    "                            {'targets': gram_matrix(act['style'][layer])[None,:],\n",
    "                             'weights': weights['style'][l][1]\n",
    "                            } \n",
    "                        ] \n",
    "                    for l,layer in enumerate(layers['style'])]\n",
    "\n",
    "    act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "    args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "    args_now = args['style'] + args['content']\n",
    "\n",
    "    make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "    context = {\n",
    "        'caffe_model': caffe_model,\n",
    "        'input_file': input_file_name,\n",
    "        'init_file': init_file_name,\n",
    "        'gpu': gpu,\n",
    "        'max_iter': max_iter,\n",
    "        'backend': 'cudnn',\n",
    "        'print_iter': 50,\n",
    "        'save_iter': 0,\n",
    "        'layer_order': list2css(layers_now),\n",
    "        'output_file': output_file_name,\n",
    "        'project_dir': project_dir\n",
    "    }\n",
    "    template = ('#!/bin/bash\\n' +\n",
    "                'cd {project_dir} && ' + \n",
    "                'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                '-caffe_model {caffe_model} ' +\n",
    "                '-input_file {input_file} ' + \n",
    "                '-init_file {init_file} ' + \n",
    "                '-gpu {gpu} ' + \n",
    "                '-max_iter {max_iter} ' +\n",
    "                '-print_iter {print_iter} ' +\n",
    "                '-save_iter {save_iter} ' +\n",
    "                '-backend {backend} ' + \n",
    "                '-layer_order {layer_order} ' +\n",
    "                '-output_file {output_file}'\n",
    "               )\n",
    "\n",
    "    script_name = project_dir + '/run_synthesis.sh'\n",
    "    with open(script_name, 'w') as script:\n",
    "        script.write(template.format(**context))\n",
    "    os.chmod(script_name, 0o755)\n",
    "    #execute script\n",
    "    !{script_name}\n",
    "    output = deprocess(get_torch_output(output_file_name))\n",
    "    imshow(output);gcf().set_size_inches(8,14);show()\n",
    "    imsave(result_dir + result_image_name, output)\n",
    "\n",
    "#make it highres\n",
    "if hr_img_size:\n",
    "    result_image_name = (\n",
    "    'cimg_' + img_names['content'] + \n",
    "    '_simg_' + img_names['style'] + \n",
    "    '_regions_' + list2css(regions) + \n",
    "    '_mm_' + str(guide_mode) + \n",
    "    '_k_' + str(k) + \n",
    "    '_sz_' + str(img_size) + \n",
    "    '_hrsz_' + str(hr_img_size) + \n",
    "    '_model_' + model_name + \n",
    "    '_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "    '_mw_' + '%.1E'%decimal.Decimal(mw) +  \n",
    "    '_cw_' + '%.1E'%decimal.Decimal(cw) +\n",
    "    '_gw_' + list2css(['%.1E'%decimal.Decimal(i) for i in gw]) + \n",
    "    # '_gw_equal_' + '%.1E'%decimal.Decimal(gw[0]) +  \n",
    "    '_layers_' + list2css(layers_now) + \n",
    "    '_gram_guided_mean' + \n",
    "    '.jpg'\n",
    "    )\n",
    "    #check if image exists\n",
    "    if os.path.isfile(result_dir + result_image_name) == False:\n",
    "        conditions = img_names.keys()\n",
    "        imgs = OrderedDict()\n",
    "        imgs_torch = OrderedDict()\n",
    "        act = OrderedDict()\n",
    "        guides = OrderedDict()\n",
    "        fm_guides = OrderedDict()\n",
    "        for cond in conditions:\n",
    "            imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "            try:\n",
    "                imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / hr_img_size**2))\n",
    "            except:\n",
    "                print('no downsampling: ' + img_names[cond])\n",
    "            imshow(imgs[cond]);show()\n",
    "            imgs_torch[cond] = preprocess(imgs[cond])\n",
    "            act[cond] = get_activations(imgs_torch[cond],\n",
    "                                        caffe_model,\n",
    "                                        layers=layers['style'],\n",
    "                                        gpu=gpu\n",
    "                                       )\n",
    "            #get guides\n",
    "            guides[cond] = []\n",
    "            for r,region in enumerate(regions):\n",
    "                print region\n",
    "                if region =='all': \n",
    "                    guide = ones(imgs[cond].shape[:2])[:,:,None]\n",
    "                else:\n",
    "                    guide = img_as_float(imread(guide_dirs[cond] + img_names[cond].rstrip('.jpg')+'_'+region+'.jpg'))\n",
    "                    if guide.ndim==2:\n",
    "                        guide = guide[:,:,None]\n",
    "                    else:\n",
    "                        guide = guide[:,:,:1]\n",
    "                    try:\n",
    "                        guide = transform.pyramid_reduce(guide, sqrt(float(guide[:,:,0].size) / hr_img_size**2))\n",
    "                    except:\n",
    "                        pass\n",
    "                imshow(guide.squeeze());colorbar();show()\n",
    "                guides[cond].append(guide.copy())\n",
    "            guides[cond] = dstack(guides[cond])\n",
    "            # guides for all style featuremaps\n",
    "            fm_guides[cond] = get_fm_guides(guides[cond],caffe_model,layers=layers['style'],mode=guide_mode,th=th,k=k)\n",
    "            for layer in layers['style']: #normalise fm guides\n",
    "                fm_guides[cond][layer] = fm_guides[cond][layer]/sqrt(diag(gram_matrix(fm_guides[cond][layer])))[:,None,None]\n",
    "\n",
    "        hr_init = preprocess(scipy.misc.imresize(output, imgs['content'].shape)/255.)\n",
    "        make_torch_init(init_file_name, hr_init)\n",
    "\n",
    "        args = OrderedDict()\n",
    "        args['style'] = [\n",
    "                            [\n",
    "                                {'targets': mean_fm_guided(act['style'][layer], fm_guides['style'][layer]),\n",
    "                                 'weights': weights['style'][l][0], \n",
    "                                 'guides':  fm_guides['content'][layer]\n",
    "                                },\n",
    "                                {'targets': gram_matrix(act['style'][layer])[None,:],\n",
    "                                 'weights': weights['style'][l][1]\n",
    "                                } \n",
    "                            ] \n",
    "                        for l,layer in enumerate(layers['style'])]\n",
    "\n",
    "        act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "        args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "        args_now = args['style'] + args['content']\n",
    "\n",
    "        make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "        context = {\n",
    "            'caffe_model': caffe_model,\n",
    "            'input_file': input_file_name,\n",
    "            'init_file': init_file_name,\n",
    "            'gpu': gpu,\n",
    "            'max_iter': hr_max_iter,\n",
    "            'backend': 'cudnn',\n",
    "            'print_iter': 50,\n",
    "            'save_iter': 0,\n",
    "            'layer_order': list2css(layers_now),\n",
    "            'output_file': output_file_name,\n",
    "            'project_dir': project_dir\n",
    "        }\n",
    "        template = ('#!/bin/bash\\n' +\n",
    "                    'cd {project_dir} && ' + \n",
    "                    'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                    '-caffe_model {caffe_model} ' +\n",
    "                    '-input_file {input_file} ' + \n",
    "                    '-init_file {init_file} ' + \n",
    "                    '-gpu {gpu} ' + \n",
    "                    '-max_iter {max_iter} ' +\n",
    "                    '-print_iter {print_iter} ' +\n",
    "                    '-save_iter {save_iter} ' +\n",
    "                    '-backend {backend} ' + \n",
    "                    '-layer_order {layer_order} ' +\n",
    "                    '-output_file {output_file}'\n",
    "                   )\n",
    "\n",
    "        script_name = project_dir + '/run_synthesis.sh'\n",
    "        with open(script_name, 'w') as script:\n",
    "            script.write(template.format(**context))\n",
    "        os.chmod(script_name, 0o755)\n",
    "        #execute script\n",
    "        !{script_name}\n",
    "        output = deprocess(get_torch_output(output_file_name))\n",
    "        imshow(output);gcf().set_size_inches(8,14);show()\n",
    "        imsave(result_dir + result_image_name, output)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Multiple style images "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### org net\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "style_img_names = ['fig2_style1.jpg','fig2_style2.jpg']\n",
    "style_regions = [['nosky'],['sky']]\n",
    "img_dirs = OrderedDict()\n",
    "img_names = OrderedDict()\n",
    "guide_names = OrderedDict()\n",
    "guide_dirs = OrderedDict()\n",
    "img_dirs['content'] = photo_dir\n",
    "img_names['content'] = 'fig2_content.jpg'\n",
    "guide_dirs['content'] = photo_dir\n",
    "guide_names['content'] = ['nosky', 'sky']\n",
    "for n, name in enumerate(style_img_names):\n",
    "    img_names['style'+str(n)] = name\n",
    "    img_dirs['style'+str(n)] = art_dir\n",
    "    guide_dirs['style'+str(n)] = art_dir\n",
    "    guide_names['style'+str(n)] = style_regions[n]\n",
    "for r, region in enumerate(style_regions):\n",
    "    guide_names['style'+str(r)] = region\n",
    "assert(len(style_regions) == len(guide_names['content']))\n",
    "layers = OrderedDict()\n",
    "layers['style'] = ['relu1_1','relu2_1','relu3_1','relu4_1','relu5_1']\n",
    "layers['content'] =  ['relu4_2']\n",
    "layers_now = layers['style'] + layers['content']\n",
    "loss_functions = [['MeanMSEGuided','GramMSE']] * len(layers['style']) + [['MSE']]\n",
    "\n",
    "guide_mode = 'simple'\n",
    "k = 0\n",
    "th = .5 #threshold for guides after downsampling\n",
    "img_size = 512\n",
    "hr_img_size = 1024\n",
    "max_iter = 500\n",
    "hr_max_iter = 200\n",
    "gpu = 0\n",
    "model_name = 'org_pad'\n",
    "caffe_model = set_model(model_name, project_dir)\n",
    "\n",
    "input_file_name = tmp_dir + 'input_sc.hdf5'\n",
    "init_file_name = tmp_dir + 'init_sc.hdf5'\n",
    "output_file_name = tmp_dir + 'output_sc.hdf5'\n",
    "n_ch = len(guide_names['content'])\n",
    "mw = 1e8/2\n",
    "sw = 1e3/2\n",
    "gw = [1./n_ch]*n_ch\n",
    "# gw = [1./100,1./100,98./100]\n",
    "cw = 1\n",
    "weights = OrderedDict()\n",
    "weights['style'] = [\n",
    "    [array([mw/(64**2)]*n_ch)*array(gw), array([sw/(64**2)])],\n",
    "    [array([mw/(128**2)]*n_ch)*array(gw), array([sw/(128**2)])],\n",
    "    [array([mw/(256**2)]*n_ch)*array(gw), array([sw/(256**2)])],\n",
    "    [array([mw/(512**2)]*n_ch)*array(gw), array([sw/(512**2)])],\n",
    "    [array([mw/(512**2)]*n_ch)*array(gw), array([sw/(512**2)])],\n",
    "]\n",
    "weights['content'] = [[array([cw])]]\n",
    "\n",
    "result_image_name = (\n",
    "'cimg_' + img_names['content'] + \n",
    "'_simg_' + list2css(style_img_names) + \n",
    "'_creg_' + list2css(guide_names['content']) +\n",
    "'_sreg_' + list2css([list2css(region) for region in style_regions]) + \n",
    "'_mm_' + str(guide_mode) + \n",
    "'_k_' + str(k) + \n",
    "'_sz_' + str(img_size) + \n",
    "'_model_' + model_name + \n",
    "'_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "'_mw_' + '%.1E'%decimal.Decimal(mw) +  \n",
    "'_cw_' + '%.1E'%decimal.Decimal(cw) +\n",
    "'_gw_' + list2css(['%.1E'%decimal.Decimal(i) for i in gw]) + \n",
    "'_layers_usual'+# + list2css(layers_now) + \n",
    "'_gram_guided_mean' + \n",
    "'.jpg'\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "if os.path.isfile(result_dir + result_image_name) == False:\n",
    "    #get images\n",
    "    conditions = img_names.keys()\n",
    "    imgs = OrderedDict()\n",
    "    imgs_torch = OrderedDict()\n",
    "    act = OrderedDict()\n",
    "    guides = OrderedDict()\n",
    "    fm_guides = OrderedDict()\n",
    "    for cond in conditions:\n",
    "        imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "        try:\n",
    "            imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / img_size**2))\n",
    "        except:\n",
    "            print('no downsampling: ' + img_names[cond])\n",
    "        imshow(imgs[cond]);show()\n",
    "        imgs_torch[cond] = preprocess(imgs[cond])\n",
    "        act[cond] = get_activations(imgs_torch[cond],\n",
    "                                    caffe_model,\n",
    "                                    layers=layers['style'],\n",
    "                                    gpu=gpu\n",
    "                                   )\n",
    "        #get guides\n",
    "        guides[cond] = []\n",
    "        for r,region in enumerate(guide_names[cond]):\n",
    "            print region\n",
    "            if region =='all': \n",
    "                guide = ones(imgs[cond].shape[:2])[:,:,None]\n",
    "            else:\n",
    "                guide = img_as_float(imread(guide_dirs[cond] + img_names[cond].rstrip('.jpg')+'_'+region+'.jpg'))\n",
    "                if guide.ndim==2:\n",
    "                    guide = guide[:,:,None]\n",
    "                else:\n",
    "                    guide = guide[:,:,:1]\n",
    "                try:\n",
    "                    guide = transform.pyramid_reduce(guide, sqrt(float(guide[:,:,0].size) / img_size**2))\n",
    "                except:\n",
    "                    pass\n",
    "            imshow(guide.squeeze());colorbar();show()\n",
    "            guides[cond].append(guide.copy())\n",
    "        guides[cond] = dstack(guides[cond])\n",
    "        # guides for all style featuremaps\n",
    "        fm_guides[cond] = get_fm_guides(guides[cond],caffe_model,layers=layers['style'],mode=guide_mode,th=th,k=k)\n",
    "        for layer in layers['style']: #normalise fm guides\n",
    "            fm_guides[cond][layer] = fm_guides[cond][layer]/sqrt(diag(gram_matrix(fm_guides[cond][layer])))[:,None,None]\n",
    "\n",
    "    make_torch_init(init_file_name, imgs_torch['content'])\n",
    "\n",
    "    args = OrderedDict()\n",
    "    args['style'] = [\n",
    "                        [\n",
    "                            {'targets': vstack([mean_fm_guided(act['style'+str(s)][layer],\n",
    "                                                                   fm_guides['style'+str(s)][layer])\n",
    "                                                for s,_ in enumerate(style_img_names)]),\n",
    "                             'weights': weights['style'][l][0], \n",
    "                             'guides':  fm_guides['content'][layer]\n",
    "                            },\n",
    "                            {'targets': vstack([gram_matrix(act['style'+str(s)][layer])[None,:]\n",
    "                                                for s,_ in enumerate(style_img_names)]).mean(0)[None,:],\n",
    "                             'weights': weights['style'][l][1]\n",
    "                            } \n",
    "                        ] \n",
    "                    for l,layer in enumerate(layers['style'])]\n",
    "\n",
    "    act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "    args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "    args_now = args['style'] + args['content']\n",
    "\n",
    "    make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "    context = {\n",
    "        'caffe_model': caffe_model,\n",
    "        'input_file': input_file_name,\n",
    "        'init_file': init_file_name,\n",
    "        'gpu': gpu,\n",
    "        'max_iter': max_iter,\n",
    "        'backend': 'cudnn',\n",
    "        'print_iter': 50,\n",
    "        'save_iter': 0,\n",
    "        'layer_order': list2css(layers_now),\n",
    "        'output_file': output_file_name,\n",
    "        'project_dir': project_dir\n",
    "    }\n",
    "    template = ('#!/bin/bash\\n' +\n",
    "                'cd {project_dir} && ' + \n",
    "                'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                '-caffe_model {caffe_model} ' +\n",
    "                '-input_file {input_file} ' + \n",
    "                '-init_file {init_file} ' + \n",
    "                '-gpu {gpu} ' + \n",
    "                '-max_iter {max_iter} ' +\n",
    "                '-print_iter {print_iter} ' +\n",
    "                '-save_iter {save_iter} ' +\n",
    "                '-backend {backend} ' + \n",
    "                '-layer_order {layer_order} ' +\n",
    "                '-output_file {output_file}'\n",
    "               )\n",
    "\n",
    "    script_name = project_dir + '/run_synthesis.sh'\n",
    "    with open(script_name, 'w') as script:\n",
    "        script.write(template.format(**context))\n",
    "    os.chmod(script_name, 0o755)\n",
    "    #execute script\n",
    "    !{script_name}\n",
    "    output = deprocess(get_torch_output(output_file_name))\n",
    "    imshow(output);gcf().set_size_inches(8,14);show()\n",
    "    imsave(result_dir + result_image_name, output)\n",
    "    if hr_img_size:\n",
    "        #make highres\n",
    "        result_image_name = (\n",
    "        'cimg_' + img_names['content'] + \n",
    "        '_simg_' + list2css(style_img_names) + \n",
    "        '_creg_' + list2css(guide_names['content']) +\n",
    "        '_sreg_' + list2css([list2css(region) for region in style_regions]) + \n",
    "        '_mm_' + str(guide_mode) + \n",
    "        '_k_' + str(k) + \n",
    "        '_sz_' + str(img_size) + \n",
    "        '_hrsz_' + str(hr_img_size) + \n",
    "        '_model_' + model_name + \n",
    "        '_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "        '_mw_' + '%.1E'%decimal.Decimal(mw) +  \n",
    "        '_cw_' + '%.1E'%decimal.Decimal(cw) +\n",
    "        '_gw_' + list2css(['%.1E'%decimal.Decimal(i) for i in gw]) + \n",
    "        '_layers_usual'+# + list2css(layers_now) + \n",
    "        '_gram_guided_mean' + \n",
    "        '.jpg'\n",
    "        )\n",
    "        if os.path.isfile(result_dir + result_image_name) == False:\n",
    "            #get images\n",
    "            conditions = img_names.keys()\n",
    "            imgs = OrderedDict()\n",
    "            imgs_torch = OrderedDict()\n",
    "            act = OrderedDict()\n",
    "            guides = OrderedDict()\n",
    "            fm_guides = OrderedDict()\n",
    "            for cond in conditions:\n",
    "                imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "                try:\n",
    "                    imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / hr_img_size**2))\n",
    "                except:\n",
    "                    print('no downsampling: ' + img_names[cond])\n",
    "                imshow(imgs[cond]);show()\n",
    "                imgs_torch[cond] = preprocess(imgs[cond])\n",
    "                act[cond] = get_activations(imgs_torch[cond],\n",
    "                                            caffe_model,\n",
    "                                            layers=layers['style'],\n",
    "                                            gpu=gpu\n",
    "                                           )\n",
    "                #get guides\n",
    "                guides[cond] = []\n",
    "                for r,region in enumerate(guide_names[cond]):\n",
    "                    print region\n",
    "                    if region =='all': \n",
    "                        guide = ones(imgs[cond].shape[:2])[:,:,None]\n",
    "                    else:\n",
    "                        guide = img_as_float(imread(guide_dirs[cond] + img_names[cond].rstrip('.jpg')+'_'+region+'.jpg'))\n",
    "                        if guide.ndim==2:\n",
    "                            guide = guide[:,:,None]\n",
    "                        else:\n",
    "                            guide = guide[:,:,:1]\n",
    "                        try:\n",
    "                            guide = transform.pyramid_reduce(guide, sqrt(float(guide[:,:,0].size) / hr_img_size**2))\n",
    "                        except:\n",
    "                            pass\n",
    "                    imshow(guide.squeeze());colorbar();show()\n",
    "                    guides[cond].append(guide.copy())\n",
    "                guides[cond] = dstack(guides[cond])\n",
    "                # guides for all style featuremaps\n",
    "                fm_guides[cond] = get_fm_guides(guides[cond],caffe_model,layers=layers['style'],mode=guide_mode,th=th,k=k)\n",
    "                for layer in layers['style']: #normalise fm guides\n",
    "                    fm_guides[cond][layer] = fm_guides[cond][layer]/sqrt(diag(gram_matrix(fm_guides[cond][layer])))[:,None,None]\n",
    "\n",
    "            hr_init = preprocess(scipy.misc.imresize(output, imgs['content'].shape)/255.)\n",
    "            make_torch_init(init_file_name, hr_init)\n",
    "\n",
    "            args = OrderedDict()\n",
    "            args['style'] = [\n",
    "                                [\n",
    "                                    {'targets': vstack([mean_fm_guided(act['style'+str(s)][layer],\n",
    "                                                                           fm_guides['style'+str(s)][layer])\n",
    "                                                        for s,_ in enumerate(style_img_names)]),\n",
    "                                     'weights': weights['style'][l][0], \n",
    "                                     'guides':  fm_guides['content'][layer]\n",
    "                                    },\n",
    "                                    {'targets': vstack([gram_matrix(act['style'+str(s)][layer])[None,:]\n",
    "                                                        for s,_ in enumerate(style_img_names)]).mean(0)[None,:],\n",
    "                                     'weights': weights['style'][l][1]\n",
    "                                    } \n",
    "                                ] \n",
    "                            for l,layer in enumerate(layers['style'])]\n",
    "\n",
    "            act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "            args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "            args_now = args['style'] + args['content']\n",
    "\n",
    "            make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "            context = {\n",
    "                'caffe_model': caffe_model,\n",
    "                'input_file': input_file_name,\n",
    "                'init_file': init_file_name,\n",
    "                'gpu': gpu,\n",
    "                'max_iter': hr_max_iter,\n",
    "                'backend': 'cudnn',\n",
    "                'print_iter': 50,\n",
    "                'save_iter': 0,\n",
    "                'layer_order': list2css(layers_now),\n",
    "                'output_file': output_file_name,\n",
    "                'project_dir': project_dir\n",
    "            }\n",
    "            template = ('#!/bin/bash\\n' +\n",
    "                        'cd {project_dir} && ' + \n",
    "                        'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                        '-caffe_model {caffe_model} ' +\n",
    "                        '-input_file {input_file} ' + \n",
    "                        '-init_file {init_file} ' + \n",
    "                        '-gpu {gpu} ' + \n",
    "                        '-max_iter {max_iter} ' +\n",
    "                        '-print_iter {print_iter} ' +\n",
    "                        '-save_iter {save_iter} ' +\n",
    "                        '-backend {backend} ' + \n",
    "                        '-layer_order {layer_order} ' +\n",
    "                        '-output_file {output_file}'\n",
    "                       )\n",
    "\n",
    "            script_name = project_dir + '/run_synthesis.sh'\n",
    "            with open(script_name, 'w') as script:\n",
    "                script.write(template.format(**context))\n",
    "            os.chmod(script_name, 0o755)\n",
    "            #execute script\n",
    "            !{script_name}\n",
    "            output = deprocess(get_torch_output(output_file_name))\n",
    "            imshow(output);gcf().set_size_inches(8,14);show()\n",
    "            imsave(result_dir + result_image_name, output)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "# Guided Sums only"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## One Style image"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### org net"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "img_dirs = OrderedDict()\n",
    "img_names = OrderedDict()\n",
    "guide_dirs = OrderedDict()\n",
    "img_dirs['content'] = photo_dir\n",
    "img_dirs['style'] = art_dir\n",
    "img_names['style'] = 'fig2_style1.jpg'\n",
    "img_names['content'] = 'fig2_content.jpg'\n",
    "guide_dirs['content'] = photo_dir\n",
    "guide_dirs['style'] = art_dir\n",
    "regions = ['sky','nosky']\n",
    "layers = OrderedDict()\n",
    "layers['style'] = ['relu1_1','relu2_1','relu3_1','relu4_1','relu5_1']\n",
    "layers['content'] =  ['relu4_2']\n",
    "layers_now = layers['style'] + layers['content']\n",
    "loss_functions = [['MeanMSEGuided']] * len(layers['style']) + [['MSE']]\n",
    "\n",
    "guide_mode = 'inside'\n",
    "k = 3\n",
    "th = .5 #threshold for guides after downsampling, only relevant for modes inside and all or k != 0\n",
    "img_size = 512\n",
    "hr_img_size = 1024\n",
    "max_iter = 500\n",
    "hr_max_iter = 200\n",
    "gpu = 0\n",
    "model_name = 'org_pad'\n",
    "caffe_model = set_model(model_name, project_dir)\n",
    "\n",
    "input_file_name = tmp_dir + 'input_sc.hdf5'\n",
    "init_file_name = tmp_dir + 'init_sc.hdf5'\n",
    "output_file_name = tmp_dir + 'output_sc.hdf5'\n",
    "\n",
    "n_ch = len(regions)\n",
    "sw = 1e9\n",
    "gw = [1./n_ch]*n_ch\n",
    "cw = 1\n",
    "weights = OrderedDict()\n",
    "weights['style'] = [\n",
    "    [array([sw/(64**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(128**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(256**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(512**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(512**2)]*n_ch)*array(gw)],\n",
    "]\n",
    "weights['content'] = [[array([cw])]]\n",
    "\n",
    "result_image_name = (\n",
    "'cimg_' + img_names['content'] + \n",
    "'_simg_' + img_names['style'] + \n",
    "'_regions_' + list2css(regions) + \n",
    "'_mm_' + str(guide_mode) + \n",
    "'_k_' + str(k) + \n",
    "'_sz_' + str(img_size) + \n",
    "'_model_' + model_name + \n",
    "'_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "'_cw_' + '%.1E'%decimal.Decimal(cw) + \n",
    "'_gw_' + list2css(['%.1E'%decimal.Decimal(i) for i in gw]) + \n",
    "# '_gw_equal_' + '%.1E'%decimal.Decimal(gw[0]) +  \n",
    "'_layers_' + list2css(layers_now) + \n",
    "'_guided_mean' + \n",
    "'.jpg'\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false,
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "#check if image exists\n",
    "if os.path.isfile(result_dir + result_image_name) == False:\n",
    "    #get images\n",
    "    conditions = img_names.keys()\n",
    "    imgs = OrderedDict()\n",
    "    imgs_torch = OrderedDict()\n",
    "    act = OrderedDict()\n",
    "    guides = OrderedDict()\n",
    "    fm_guides = OrderedDict()\n",
    "    for cond in conditions:\n",
    "        imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "        try:\n",
    "            imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / img_size**2))\n",
    "        except:\n",
    "            print('no downsampling: ' + img_names[cond])\n",
    "        imshow(imgs[cond]);show()\n",
    "        imgs_torch[cond] = preprocess(imgs[cond])\n",
    "        act[cond] = get_activations(imgs_torch[cond],\n",
    "                                    caffe_model,\n",
    "                                    layers=layers['style'],\n",
    "                                    gpu=gpu\n",
    "                                   )\n",
    "        #get guides\n",
    "        guides[cond] = []\n",
    "        for r,region in enumerate(regions):\n",
    "            print region\n",
    "            if region =='all': \n",
    "                guide = ones(imgs[cond].shape[:2])[:,:,None]\n",
    "            else:\n",
    "                guide = img_as_float(imread(guide_dirs[cond] + img_names[cond].rstrip('.jpg')+'_'+region+'.jpg'))\n",
    "                if guide.ndim==2:\n",
    "                    guide = guide[:,:,None]\n",
    "                else:\n",
    "                    guide = guide[:,:,:1]\n",
    "                try:\n",
    "                    guide = transform.pyramid_reduce(guide, sqrt(float(guide[:,:,0].size) / img_size**2))\n",
    "                except:\n",
    "                    pass\n",
    "            imshow(guide.squeeze());colorbar();show()\n",
    "            guides[cond].append(guide.copy())\n",
    "        guides[cond] = dstack(guides[cond])\n",
    "        # guides for all style featuremaps\n",
    "        fm_guides[cond] = get_fm_guides(guides[cond],caffe_model,layers=layers['style'],mode=guide_mode,th=th,k=k)\n",
    "        for layer in layers['style']: #normalise fm guides\n",
    "            fm_guides[cond][layer] = fm_guides[cond][layer]/sqrt(diag(gram_matrix(fm_guides[cond][layer])))[:,None,None]\n",
    "\n",
    "    make_torch_init(init_file_name, imgs_torch['content'])\n",
    "\n",
    "    args = OrderedDict()\n",
    "    args['style'] = [\n",
    "                        [\n",
    "                            {'targets': mean_fm_guided(act['style'][layer], fm_guides['style'][layer]),\n",
    "                             'weights': weights['style'][l][0], \n",
    "                             'guides':  fm_guides['content'][layer]\n",
    "                            }\n",
    "                        ] \n",
    "                    for l,layer in enumerate(layers['style'])]\n",
    "\n",
    "    act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "    args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "    args_now = args['style'] + args['content']\n",
    "\n",
    "    make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "    context = {\n",
    "        'caffe_model': caffe_model,\n",
    "        'input_file': input_file_name,\n",
    "        'init_file': init_file_name,\n",
    "        'gpu': gpu,\n",
    "        'max_iter': max_iter,\n",
    "        'backend': 'cudnn',\n",
    "        'print_iter': 50,\n",
    "        'save_iter': 0,\n",
    "        'layer_order': list2css(layers_now),\n",
    "        'output_file': output_file_name,\n",
    "        'project_dir': project_dir\n",
    "    }\n",
    "    template = ('#!/bin/bash\\n' +\n",
    "                'cd {project_dir} && ' + \n",
    "                'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                '-caffe_model {caffe_model} ' +\n",
    "                '-input_file {input_file} ' + \n",
    "                '-init_file {init_file} ' + \n",
    "                '-gpu {gpu} ' + \n",
    "                '-max_iter {max_iter} ' +\n",
    "                '-print_iter {print_iter} ' +\n",
    "                '-save_iter {save_iter} ' +\n",
    "                '-backend {backend} ' + \n",
    "                '-layer_order {layer_order} ' +\n",
    "                '-output_file {output_file}'\n",
    "               )\n",
    "\n",
    "    script_name = project_dir + '/run_synthesis.sh'\n",
    "    with open(script_name, 'w') as script:\n",
    "        script.write(template.format(**context))\n",
    "    os.chmod(script_name, 0o755)\n",
    "    #execute script\n",
    "    !{script_name}\n",
    "    output = deprocess(get_torch_output(output_file_name))\n",
    "    imshow(output);gcf().set_size_inches(8,14);show()\n",
    "    imsave(result_dir + result_image_name, output)\n",
    "\n",
    "#make it highres\n",
    "if hr_img_size:\n",
    "    result_image_name = (\n",
    "    'cimg_' + img_names['content'] + \n",
    "    '_simg_' + img_names['style'] + \n",
    "    '_regions_' + list2css(regions) + \n",
    "    '_mm_' + str(guide_mode) + \n",
    "    '_k_' + str(k) + \n",
    "    '_sz_' + str(img_size) + \n",
    "    '_hrsz_' + str(hr_img_size) + \n",
    "    '_model_' + model_name + \n",
    "    '_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "    '_cw_' + '%.1E'%decimal.Decimal(cw) + \n",
    "    '_gw_' + list2css(['%.1E'%decimal.Decimal(i) for i in gw]) + \n",
    "    # '_gw_equal_' + '%.1E'%decimal.Decimal(gw[0]) +  \n",
    "    '_layers_' + list2css(layers_now) + \n",
    "    '_guided_mean' + \n",
    "    '.jpg'\n",
    "    )\n",
    "    #check if image exists\n",
    "    if os.path.isfile(result_dir + result_image_name) == False:\n",
    "        conditions = img_names.keys()\n",
    "        imgs = OrderedDict()\n",
    "        imgs_torch = OrderedDict()\n",
    "        act = OrderedDict()\n",
    "        guides = OrderedDict()\n",
    "        fm_guides = OrderedDict()\n",
    "        for cond in conditions:\n",
    "            imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "            try:\n",
    "                imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / hr_img_size**2))\n",
    "            except:\n",
    "                print('no downsampling: ' + img_names[cond])\n",
    "            imshow(imgs[cond]);show()\n",
    "            imgs_torch[cond] = preprocess(imgs[cond])\n",
    "            act[cond] = get_activations(imgs_torch[cond],\n",
    "                                        caffe_model,\n",
    "                                        layers=layers['style'],\n",
    "                                        gpu=gpu\n",
    "                                       )\n",
    "            #get guides\n",
    "            guides[cond] = []\n",
    "            for r,region in enumerate(regions):\n",
    "                print region\n",
    "                if region =='all': \n",
    "                    guide = ones(imgs[cond].shape[:2])[:,:,None]\n",
    "                else:\n",
    "                    guide = img_as_float(imread(guide_dirs[cond] + img_names[cond].rstrip('.jpg')+'_'+region+'.jpg'))\n",
    "                    if guide.ndim==2:\n",
    "                        guide = guide[:,:,None]\n",
    "                    else:\n",
    "                        guide = guide[:,:,:1]\n",
    "                    try:\n",
    "                        guide = transform.pyramid_reduce(guide, sqrt(float(guide[:,:,0].size) / hr_img_size**2))\n",
    "                    except:\n",
    "                        pass\n",
    "                imshow(guide.squeeze());colorbar();show()\n",
    "                guides[cond].append(guide.copy())\n",
    "            guides[cond] = dstack(guides[cond])\n",
    "            # guides for all style featuremaps\n",
    "            fm_guides[cond] = get_fm_guides(guides[cond],caffe_model,layers=layers['style'],mode=guide_mode,th=th,k=k)\n",
    "            for layer in layers['style']: #normalise fm guides\n",
    "                fm_guides[cond][layer] = fm_guides[cond][layer]/sqrt(diag(gram_matrix(fm_guides[cond][layer])))[:,None,None]\n",
    "\n",
    "        hr_init = preprocess(scipy.misc.imresize(output, imgs['content'].shape)/255.)\n",
    "        make_torch_init(init_file_name, hr_init)\n",
    "\n",
    "        args = OrderedDict()\n",
    "        args['style'] = [\n",
    "                            [\n",
    "                                {'targets': mean_fm_guided(act['style'][layer], fm_guides['style'][layer]),\n",
    "                                 'weights': weights['style'][l][0], \n",
    "                                 'guides':  fm_guides['content'][layer]\n",
    "                                }\n",
    "                            ] \n",
    "                        for l,layer in enumerate(layers['style'])]\n",
    "\n",
    "        act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "        args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "        args_now = args['style'] + args['content']\n",
    "\n",
    "        make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "        context = {\n",
    "            'caffe_model': caffe_model,\n",
    "            'input_file': input_file_name,\n",
    "            'init_file': init_file_name,\n",
    "            'gpu': gpu,\n",
    "            'max_iter': hr_max_iter,\n",
    "            'backend': 'cudnn',\n",
    "            'print_iter': 50,\n",
    "            'save_iter': 0,\n",
    "            'layer_order': list2css(layers_now),\n",
    "            'output_file': output_file_name,\n",
    "            'project_dir': project_dir\n",
    "        }\n",
    "        template = ('#!/bin/bash\\n' +\n",
    "                    'cd {project_dir} && ' + \n",
    "                    'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                    '-caffe_model {caffe_model} ' +\n",
    "                    '-input_file {input_file} ' + \n",
    "                    '-init_file {init_file} ' + \n",
    "                    '-gpu {gpu} ' + \n",
    "                    '-max_iter {max_iter} ' +\n",
    "                    '-print_iter {print_iter} ' +\n",
    "                    '-save_iter {save_iter} ' +\n",
    "                    '-backend {backend} ' + \n",
    "                    '-layer_order {layer_order} ' +\n",
    "                    '-output_file {output_file}'\n",
    "                   )\n",
    "\n",
    "        script_name = project_dir + '/run_synthesis.sh'\n",
    "        with open(script_name, 'w') as script:\n",
    "            script.write(template.format(**context))\n",
    "        os.chmod(script_name, 0o755)\n",
    "        #execute script\n",
    "        !{script_name}\n",
    "        output = deprocess(get_torch_output(output_file_name))\n",
    "        imshow(output);gcf().set_size_inches(8,14);show()\n",
    "        imsave(result_dir + result_image_name, output)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Multiple Style Images"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### org net"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "style_img_names = ['fig2_style1.jpg','fig2_style2.jpg']\n",
    "style_regions = [['nosky'],['sky']]\n",
    "img_dirs = OrderedDict()\n",
    "img_names = OrderedDict()\n",
    "guide_names = OrderedDict()\n",
    "guide_dirs = OrderedDict()\n",
    "img_dirs['content'] = photo_dir\n",
    "img_names['content'] = 'fig2_content.jpg'\n",
    "guide_dirs['content'] = photo_dir\n",
    "guide_names['content'] = ['nosky', 'sky']\n",
    "for n, name in enumerate(style_img_names):\n",
    "    img_names['style'+str(n)] = name\n",
    "    img_dirs['style'+str(n)] = art_dir\n",
    "    guide_dirs['style'+str(n)] = art_dir\n",
    "    guide_names['style'+str(n)] = style_regions[n]\n",
    "for r, region in enumerate(style_regions):\n",
    "    guide_names['style'+str(r)] = region\n",
    "assert(len(style_regions) == len(guide_names['content']))\n",
    "layers = OrderedDict()\n",
    "layers['style'] = ['relu1_1','relu2_1','relu3_1','relu4_1','relu5_1']\n",
    "layers['content'] =  ['relu4_2']\n",
    "layers_now = layers['style'] + layers['content']\n",
    "loss_functions = [['MeanMSEGuided']] * len(layers['style']) + [['MSE']]\n",
    "\n",
    "guide_mode = 'simple'\n",
    "k = 0\n",
    "th = .5 #threshold for guides after downsampling\n",
    "img_size = 512\n",
    "hr_img_size = 1024\n",
    "max_iter = 500\n",
    "hr_max_iter = 200\n",
    "gpu = 0\n",
    "model_name = 'org_pad'\n",
    "caffe_model = set_model(model_name, project_dir)\n",
    "\n",
    "input_file_name = tmp_dir + 'input_sc.hdf5'\n",
    "init_file_name = tmp_dir + 'init_sc.hdf5'\n",
    "output_file_name = tmp_dir + 'output_sc.hdf5'\n",
    "n_ch = len(guide_names['content'])\n",
    "sw = 1e8\n",
    "gw = [1./n_ch]*n_ch\n",
    "# gw = [1./100,1./100,98./100]\n",
    "cw = 1\n",
    "weights = OrderedDict()\n",
    "weights['style'] = [\n",
    "    [array([sw/(64**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(128**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(256**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(512**2)]*n_ch)*array(gw)],\n",
    "    [array([sw/(512**2)]*n_ch)*array(gw)],\n",
    "]\n",
    "weights['content'] = [[array([cw])]]\n",
    "\n",
    "result_image_name = (\n",
    "'cimg_' + img_names['content'] + \n",
    "'_simg_' + list2css(style_img_names) + \n",
    "'_creg_' + list2css(guide_names['content']) +\n",
    "'_sreg_' + list2css([list2css(region) for region in style_regions]) + \n",
    "'_mm_' + str(guide_mode) + \n",
    "'_k_' + str(k) + \n",
    "'_sz_' + str(img_size) + \n",
    "'_model_' + model_name + \n",
    "'_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "'_cw_' + '%.1E'%decimal.Decimal(cw) + \n",
    "# '_gw_' + list2css(['%.1E'%decimal.Decimal(i) for i in gw]) + \n",
    "'_gw_equal_' + '%.1E'%decimal.Decimal(gw[0]) +  \n",
    "'_layers_usual'+# + list2css(layers_now) + \n",
    "'_guided_mean' + \n",
    "'.jpg'\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "if os.path.isfile(result_dir + result_image_name) == False:\n",
    "    #get images\n",
    "    conditions = img_names.keys()\n",
    "    imgs = OrderedDict()\n",
    "    imgs_torch = OrderedDict()\n",
    "    act = OrderedDict()\n",
    "    guides = OrderedDict()\n",
    "    fm_guides = OrderedDict()\n",
    "    for cond in conditions:\n",
    "        imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "        try:\n",
    "            imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / img_size**2))\n",
    "        except:\n",
    "            print('no downsampling: ' + img_names[cond])\n",
    "        imshow(imgs[cond]);show()\n",
    "        imgs_torch[cond] = preprocess(imgs[cond])\n",
    "        act[cond] = get_activations(imgs_torch[cond],\n",
    "                                    caffe_model,\n",
    "                                    layers=layers['style'],\n",
    "                                    gpu=gpu\n",
    "                                   )\n",
    "        #get guides\n",
    "        guides[cond] = []\n",
    "        for r,region in enumerate(guide_names[cond]):\n",
    "            print region\n",
    "            if region =='all': \n",
    "                guide = ones(imgs[cond].shape[:2])[:,:,None]\n",
    "            else:\n",
    "                guide = img_as_float(imread(guide_dirs[cond] + img_names[cond].rstrip('.jpg')+'_'+region+'.jpg'))\n",
    "                if guide.ndim==2:\n",
    "                    guide = guide[:,:,None]\n",
    "                else:\n",
    "                    guide = guide[:,:,:1]\n",
    "                try:\n",
    "                    guide = transform.pyramid_reduce(guide, sqrt(float(guide[:,:,0].size) / img_size**2))\n",
    "                except:\n",
    "                    pass\n",
    "            imshow(guide.squeeze());colorbar();show()\n",
    "            guides[cond].append(guide.copy())\n",
    "        guides[cond] = dstack(guides[cond])\n",
    "        # guides for all style featuremaps\n",
    "        fm_guides[cond] = get_fm_guides(guides[cond],caffe_model,layers=layers['style'],mode=guide_mode,th=th,k=k)\n",
    "        for layer in layers['style']: #normalise fm guides\n",
    "            fm_guides[cond][layer] = fm_guides[cond][layer]/sqrt(diag(gram_matrix(fm_guides[cond][layer])))[:,None,None]\n",
    "\n",
    "    make_torch_init(init_file_name, imgs_torch['content'])\n",
    "\n",
    "    args = OrderedDict()\n",
    "    args['style'] = [\n",
    "                        [\n",
    "                            {'targets': vstack([mean_fm_guided(act['style'+str(s)][layer],\n",
    "                                                                   fm_guides['style'+str(s)][layer])\n",
    "                                                for s,_ in enumerate(style_img_names)]),\n",
    "                             'weights': weights['style'][l][0], \n",
    "                             'guides':  fm_guides['content'][layer]\n",
    "                            }\n",
    "                        ] \n",
    "                    for l,layer in enumerate(layers['style'])]\n",
    "\n",
    "    act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "    args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "    args_now = args['style'] + args['content']\n",
    "\n",
    "    make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "    context = {\n",
    "        'caffe_model': caffe_model,\n",
    "        'input_file': input_file_name,\n",
    "        'init_file': init_file_name,\n",
    "        'gpu': gpu,\n",
    "        'max_iter': max_iter,\n",
    "        'backend': 'cudnn',\n",
    "        'print_iter': 50,\n",
    "        'save_iter': 0,\n",
    "        'layer_order': list2css(layers_now),\n",
    "        'output_file': output_file_name,\n",
    "        'project_dir': project_dir\n",
    "    }\n",
    "    template = ('#!/bin/bash\\n' +\n",
    "                'cd {project_dir} && ' + \n",
    "                'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                '-caffe_model {caffe_model} ' +\n",
    "                '-input_file {input_file} ' + \n",
    "                '-init_file {init_file} ' + \n",
    "                '-gpu {gpu} ' + \n",
    "                '-max_iter {max_iter} ' +\n",
    "                '-print_iter {print_iter} ' +\n",
    "                '-save_iter {save_iter} ' +\n",
    "                '-backend {backend} ' + \n",
    "                '-layer_order {layer_order} ' +\n",
    "                '-output_file {output_file}'\n",
    "               )\n",
    "\n",
    "    script_name = project_dir + '/run_synthesis.sh'\n",
    "    with open(script_name, 'w') as script:\n",
    "        script.write(template.format(**context))\n",
    "    os.chmod(script_name, 0o755)\n",
    "    #execute script\n",
    "    !{script_name}\n",
    "    output = deprocess(get_torch_output(output_file_name))\n",
    "    imshow(output);gcf().set_size_inches(8,14);show()\n",
    "    imsave(result_dir + result_image_name, output)\n",
    "    if hr_img_size:\n",
    "        #make highres\n",
    "        result_image_name = (\n",
    "        'cimg_' + img_names['content'] + \n",
    "        '_simg_' + list2css(style_img_names) + \n",
    "        '_creg_' + list2css(guide_names['content']) +\n",
    "        '_sreg_' + list2css([list2css(region) for region in style_regions]) + \n",
    "        '_mm_' + str(guide_mode) + \n",
    "        '_k_' + str(k) + \n",
    "        '_sz_' + str(img_size) + \n",
    "        '_hrsz_' + str(hr_img_size) + \n",
    "        '_model_' + model_name + \n",
    "        '_sw_' + '%.1E'%decimal.Decimal(sw) + \n",
    "        '_cw_' + '%.1E'%decimal.Decimal(cw) + \n",
    "        # '_gw_' + list2css(['%.1E'%decimal.Decimal(i) for i in gw]) + \n",
    "        '_gw_equal_' + '%.1E'%decimal.Decimal(gw[0]) +  \n",
    "        '_layers_usual'+# + list2css(layers_now) + \n",
    "        '_guided_mean' + \n",
    "        '.jpg'\n",
    "        )\n",
    "        if os.path.isfile(result_dir + result_image_name) == False:\n",
    "            #get images\n",
    "            conditions = img_names.keys()\n",
    "            imgs = OrderedDict()\n",
    "            imgs_torch = OrderedDict()\n",
    "            act = OrderedDict()\n",
    "            guides = OrderedDict()\n",
    "            fm_guides = OrderedDict()\n",
    "            for cond in conditions:\n",
    "                imgs[cond] = img_as_float(imread(img_dirs[cond] + img_names[cond]))\n",
    "                try:\n",
    "                    imgs[cond] = transform.pyramid_reduce(imgs[cond], sqrt(float(imgs[cond][:,:,0].size) / hr_img_size**2))\n",
    "                except:\n",
    "                    print('no downsampling: ' + img_names[cond])\n",
    "                imshow(imgs[cond]);show()\n",
    "                imgs_torch[cond] = preprocess(imgs[cond])\n",
    "                act[cond] = get_activations(imgs_torch[cond],\n",
    "                                            caffe_model,\n",
    "                                            layers=layers['style'],\n",
    "                                            gpu=gpu\n",
    "                                           )\n",
    "                #get guides\n",
    "                guides[cond] = []\n",
    "                for r,region in enumerate(guide_names[cond]):\n",
    "                    print region\n",
    "                    if region =='all': \n",
    "                        guide = ones(imgs[cond].shape[:2])[:,:,None]\n",
    "                    else:\n",
    "                        guide = img_as_float(imread(guide_dirs[cond] + img_names[cond].rstrip('.jpg')+'_'+region+'.jpg'))\n",
    "                        if guide.ndim==2:\n",
    "                            guide = guide[:,:,None]\n",
    "                        else:\n",
    "                            guide = guide[:,:,:1]\n",
    "                        try:\n",
    "                            guide = transform.pyramid_reduce(guide, sqrt(float(guide[:,:,0].size) / hr_img_size**2))\n",
    "                        except:\n",
    "                            pass\n",
    "                    imshow(guide.squeeze());colorbar();show()\n",
    "                    guides[cond].append(guide.copy())\n",
    "                guides[cond] = dstack(guides[cond])\n",
    "                # guides for all style featuremaps\n",
    "                fm_guides[cond] = get_fm_guides(guides[cond],caffe_model,layers=layers['style'],mode=guide_mode,th=th,k=k)\n",
    "                for layer in layers['style']: #normalise fm guides\n",
    "                    fm_guides[cond][layer] = fm_guides[cond][layer]/sqrt(diag(gram_matrix(fm_guides[cond][layer])))[:,None,None]\n",
    "\n",
    "            hr_init = preprocess(scipy.misc.imresize(output, imgs['content'].shape)/255.)\n",
    "            make_torch_init(init_file_name, hr_init)\n",
    "\n",
    "            args = OrderedDict()\n",
    "            args['style'] = [\n",
    "                                [\n",
    "                                    {'targets': vstack([mean_fm_guided(act['style'+str(s)][layer],\n",
    "                                                                           fm_guides['style'+str(s)][layer])\n",
    "                                                        for s,_ in enumerate(style_img_names)]),\n",
    "                                     'weights': weights['style'][l][0], \n",
    "                                     'guides':  fm_guides['content'][layer]\n",
    "                                    }\n",
    "                                ] \n",
    "                            for l,layer in enumerate(layers['style'])]\n",
    "\n",
    "            act['content'] = get_activations(imgs_torch['content'], caffe_model, layers=layers['content'], gpu=gpu)\n",
    "            args['content'] = [[{'targets': act['content'][layers['content'][0]][None,:],'weights': weights['content'][0][0]}],]                \n",
    "            args_now = args['style'] + args['content']\n",
    "\n",
    "            make_torch_input(input_file_name, layers_now, loss_functions, args_now)\n",
    "            context = {\n",
    "                'caffe_model': caffe_model,\n",
    "                'input_file': input_file_name,\n",
    "                'init_file': init_file_name,\n",
    "                'gpu': gpu,\n",
    "                'max_iter': hr_max_iter,\n",
    "                'backend': 'cudnn',\n",
    "                'print_iter': 50,\n",
    "                'save_iter': 0,\n",
    "                'layer_order': list2css(layers_now),\n",
    "                'output_file': output_file_name,\n",
    "                'project_dir': project_dir\n",
    "            }\n",
    "            template = ('#!/bin/bash\\n' +\n",
    "                        'cd {project_dir} && ' + \n",
    "                        'time /usr/local/torch/install/bin/th ImageSynthesis.lua ' + \n",
    "                        '-caffe_model {caffe_model} ' +\n",
    "                        '-input_file {input_file} ' + \n",
    "                        '-init_file {init_file} ' + \n",
    "                        '-gpu {gpu} ' + \n",
    "                        '-max_iter {max_iter} ' +\n",
    "                        '-print_iter {print_iter} ' +\n",
    "                        '-save_iter {save_iter} ' +\n",
    "                        '-backend {backend} ' + \n",
    "                        '-layer_order {layer_order} ' +\n",
    "                        '-output_file {output_file}'\n",
    "                       )\n",
    "\n",
    "            script_name = project_dir + '/run_synthesis.sh'\n",
    "            with open(script_name, 'w') as script:\n",
    "                script.write(template.format(**context))\n",
    "            os.chmod(script_name, 0o755)\n",
    "            #execute script\n",
    "            !{script_name}\n",
    "\n",
    "            output = deprocess(get_torch_output(output_file_name))\n",
    "            imshow(output);gcf().set_size_inches(8,14);show()\n",
    "            imsave(result_dir + result_image_name, output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
