{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# User study experiments"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# for CPU only\n",
    "\n",
    "# import os\n",
    "# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2023-11-02 16:54:38.978815: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading BERT model... "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at distilbert-base-uncased were not used when initializing DistilBertModel: ['vocab_projector.weight', 'vocab_layer_norm.weight', 'vocab_transform.weight', 'vocab_layer_norm.bias', 'vocab_projector.bias', 'vocab_transform.bias']\n",
      "- This IS expected if you are initializing DistilBertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing DistilBertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "done\n",
      "loading CLIP model... done\n",
      "loading precomputed CLIP text embbedings... done\n",
      "loading precomputed CLIP img embbedings... done\n",
      "DEVICE:  cpu\n"
     ]
    }
   ],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "import numpy as np\n",
    "from src.motion_refiner_4D import Motion_refiner, MAX_NUM_OBJS\n",
    "from src.config import *\n",
    "base_folder = \"./\"\n",
    "# base_folder = \"/home/arthur/local_data/\" \n",
    "\n",
    "traj_n = 40\n",
    "mr = Motion_refiner(load_models=True ,traj_n = traj_n, locality_factor=True, clip_only=False,load_precomp_emb=True)\n",
    "feature_indices, obj_sim_indices, obj_poses_indices, traj_indices = mr.get_indices()\n",
    "embedding_indices = np.concatenate([feature_indices,obj_sim_indices, obj_poses_indices])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading dataset:  latte_100k_lf ...done\n",
      "raw X: (100000, 953) \tY: (100000, 160)\n",
      "filtered X: (96718, 953) \tY: (96718, 160)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2023-11-02 16:55:00.059597: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set\n",
      "2023-11-02 16:55:00.059695: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
      "2023-11-02 16:55:01.562440: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected\n",
      "2023-11-02 16:55:01.562475: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: arthur-XPS-15-9530\n",
      "2023-11-02 16:55:01.562479: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: arthur-XPS-15-9530\n",
      "2023-11-02 16:55:01.562532: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 535.129.3\n",
      "2023-11-02 16:55:01.562549: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 535.129.3\n",
      "2023-11-02 16:55:01.562552: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 535.129.3\n",
      "2023-11-02 16:55:01.563431: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  SSE4.1 SSE4.2 AVX AVX2 FMA\n",
      "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
      "2023-11-02 16:55:01.564465: I tensorflow/compiler/jit/xla_gpu_device.cc:99] Not creating XLA devices, tf_xla_enable_xla_devices not set\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train X: (67702, 953) \tY: (67702, 160)\n",
      "Test  X: (19344, 953) \tY: (19344, 160)\n",
      "Val   X: (9672, 953) \tY: (9672, 160)\n"
     ]
    }
   ],
   "source": [
    "dataset_name = \"latte_100k_lf\"\n",
    "\n",
    "X,Y, data = mr.load_dataset(dataset_name, filter_data = True, base_path=base_folder+\"data/\")\n",
    "X_train, X_test, X_valid, y_train, y_test, y_valid, indices_train, indices_test, indices_val = mr.split_dataset(X, Y, test_size=0.2, val_size=0.1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### load datasets with predicted data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "ename": "FileNotFoundError",
     "evalue": "[Errno 2] No such file or directory: '/home/arthur/local_data/data/testpred_100k_latte_f.json'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m/home/arthur/LaTTe-Language-Trajectory-TransformEr/user_study.ipynb Cell 5\u001b[0m line \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> <a href='vscode-notebook-cell:/home/arthur/LaTTe-Language-Trajectory-TransformEr/user_study.ipynb#W4sZmlsZQ%3D%3D?line=0'>1</a>\u001b[0m data_pred \u001b[39m=\u001b[39m mr\u001b[39m.\u001b[39;49mload_data(\u001b[39m\"\u001b[39;49m\u001b[39mtestpred_100k_latte_f\u001b[39;49m\u001b[39m\"\u001b[39;49m, base_path\u001b[39m=\u001b[39;49mdata_folder)\n\u001b[1;32m      <a href='vscode-notebook-cell:/home/arthur/LaTTe-Language-Trajectory-TransformEr/user_study.ipynb#W4sZmlsZQ%3D%3D?line=1'>2</a>\u001b[0m data_no_language \u001b[39m=\u001b[39m mr\u001b[39m.\u001b[39mload_data(\u001b[39m\"\u001b[39m\u001b[39mtest_no_language_100k_latte_f\u001b[39m\u001b[39m\"\u001b[39m, base_path\u001b[39m=\u001b[39mdata_folder)\n\u001b[1;32m      <a href='vscode-notebook-cell:/home/arthur/LaTTe-Language-Trajectory-TransformEr/user_study.ipynb#W4sZmlsZQ%3D%3D?line=2'>3</a>\u001b[0m data_2d \u001b[39m=\u001b[39m mr\u001b[39m.\u001b[39mload_data(\u001b[39m\"\u001b[39m\u001b[39mpred2D_100k_latte_f\u001b[39m\u001b[39m\"\u001b[39m, base_path\u001b[39m=\u001b[39mdata_folder)\n",
      "File \u001b[0;32m~/LaTTe-Language-Trajectory-TransformEr/src/motion_refiner_4D.py:403\u001b[0m, in \u001b[0;36mMotion_refiner.load_data\u001b[0;34m(self, data_name, base_path)\u001b[0m\n\u001b[1;32m    401\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mload_data\u001b[39m(\u001b[39mself\u001b[39m, data_name\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mdata\u001b[39m\u001b[39m\"\u001b[39m, base_path\u001b[39m=\u001b[39mbase_path):\n\u001b[1;32m    402\u001b[0m     data_dict \u001b[39m=\u001b[39m {}\n\u001b[0;32m--> 403\u001b[0m     \u001b[39mwith\u001b[39;00m \u001b[39mopen\u001b[39;49m(base_path\u001b[39m+\u001b[39;49mdata_name\u001b[39m+\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39m.json\u001b[39;49m\u001b[39m\"\u001b[39;49m) \u001b[39mas\u001b[39;00m f:\n\u001b[1;32m    404\u001b[0m         data_dict \u001b[39m=\u001b[39m json\u001b[39m.\u001b[39mload(f)\n\u001b[1;32m    405\u001b[0m     data \u001b[39m=\u001b[39m \u001b[39mlist\u001b[39m(data_dict\u001b[39m.\u001b[39mvalues())\n",
      "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/home/arthur/local_data/data/testpred_100k_latte_f.json'"
     ]
    }
   ],
   "source": [
    "data_pred = mr.load_data(\"testpred_100k_latte_f\", base_path=data_folder)\n",
    "data_no_language = mr.load_data(\"test_no_language_100k_latte_f\", base_path=data_folder)\n",
    "data_2d = mr.load_data(\"pred2D_100k_latte_f\", base_path=data_folder)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2D model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'num_layers_enc': 2, 'num_layers_dec': 4, 'd_model': 256, 'dff': 512, 'num_heads': 8, 'dropout_rate': 0.1, 'wp_d': 2, 'bs': 64, 'dense_n': 512, 'num_dense': 3, 'concat_emb': True, 'features_n': 777, 'optimizer': 'RMSprop', 'norm_layer': True, 'activation': 'tanh'}\n",
      "loading weights:  ./models/refined_refined_TF&num_layers_enc:2&num_layers_dec:4&d_model:256&dff:512&num_heads:8&dropout_rate:0.1&wp_d:2&bs:64&dense_n:512&num_dense:3&concat_emb:True&features_n:777&optimizer:RMSprop&norm_layer:True&activation:tanh.h5\n"
     ]
    }
   ],
   "source": [
    "from src.simple_TF_continuos import *\n",
    "\n",
    "model_path = \"./models/\"\n",
    "model_name = \"refined_refined_TF&num_layers_enc:2&num_layers_dec:4&d_model:256&dff:512&num_heads:8&dropout_rate:0.1&wp_d:2&bs:64&dense_n:512&num_dense:3&concat_emb:True&features_n:777&optimizer:RMSprop&norm_layer:True&activation:tanh.h5\"\n",
    "\n",
    "model_file = model_path+model_name\n",
    "model_2d = load_model(model_file)\n",
    "compile(model_2d)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from src.functions import *\n",
    "from src.traj_utils import *\n",
    "from src.motion_refiner import Motion_refiner as Motion_refiner2D\n",
    "\n",
    "mr2d = Motion_refiner2D(load_models=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1900\n"
     ]
    }
   ],
   "source": [
    "def apply_interaction(mr, traj, obj_poses_, text, obj_names, n_obj=3):\n",
    "    mod = 1\n",
    "    p1 = np.array([0.1, 0.1])-0.5\n",
    "    p2 = np.array([0.9, 0.9])-0.5\n",
    "    traj_np = traj\n",
    "    if isinstance(traj, list):\n",
    "        traj_np = np.array(traj)\n",
    "    obj_poses = obj_poses_\n",
    "    if isinstance(obj_poses_, list):\n",
    "        obj_poses = np.array(obj_poses_)\n",
    "    traj_raw = traj_np[::mod, :2]\n",
    "    t0 = traj_raw[0, :]\n",
    "    tf = traj_raw[-1, :]\n",
    "    traj_raw_n, obj_poses_new = interpolate_2points(\n",
    "        traj_raw, p1, p2, objs=obj_poses.T)\n",
    "\n",
    "    d = np2data(traj_raw_n, obj_names,\n",
    "                obj_poses_new.T, text, output_traj=None)\n",
    "    # X, _ = mr.prepare_data(d, label=False)\n",
    "    pred, traj_in = mr.apply_interaction(\n",
    "        model_2d, d[0], text,  label=False)\n",
    "    # print(pred)\n",
    "    # print(pred.shape)\n",
    "    # print(t0, tf)\n",
    "\n",
    "    new_traj_simple = interpolate_2points(pred[0, :, :], t0, tf)\n",
    "\n",
    "    new_traj_wp = fit_wps_to_traj(new_traj_simple, traj_raw)\n",
    "\n",
    "    constraints = np.ones([traj_raw.shape[0]])*0.15\n",
    "\n",
    "    new_traj_wp_cnt = mr.follow_hard_constraints(\n",
    "        traj_raw, new_traj_wp, constraints)\n",
    "\n",
    "    new_traj_wp_scaled = mr.addapt_to_hard_constraints(\n",
    "        traj_raw, new_traj_wp, constraints)\n",
    "\n",
    "    return new_traj_wp, pred[0, :, :], obj_poses_new.T\n",
    "\n",
    "data_2d = []\n",
    "for d_ in data[:10000]:\n",
    "    if len(d_[\"obj_names\"]) == 3:\n",
    "        d=d_.copy()\n",
    "    \n",
    "        traj, obj_poses, text, obj_names = d[\"input_traj\"], d[\"obj_poses\"],d[\"text\"],d[\"obj_names\"]\n",
    "        traj = np.array(traj)\n",
    "        obj_poses = np.array(obj_poses)\n",
    "        try:\n",
    "            new_traj_wp, pred, obj_poses_new = apply_interaction(mr2d, traj[:,:2], obj_poses[:3,:2].T, text, obj_names, n_obj=3)\n",
    "            traj_new = traj.copy()\n",
    "            traj_new[:,:2] = new_traj_wp[:,:2]\n",
    "            d[\"output_traj\"] = traj_new\n",
    "            data_2d.append(d)\n",
    "        except:\n",
    "            pass\n",
    "    # print(d[\"output_traj\"])\n",
    "print(len(data_2d))\n",
    "mr.save_data(data_2d,data_name=\"pred2D_100k_latte_f\", base_path=data_folder)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## opposit of Groud Truth"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_opposit = []\n",
    "for d_ in data:\n",
    "    d = d_.copy()\n",
    "    traj_in = np.array(d[\"input_traj\"])\n",
    "    traj_out = np.array(d[\"output_traj\"])\n",
    "    delta = traj_out-traj_in\n",
    "    new_traj = traj_in - delta\n",
    "    d[\"output_traj\"] = new_traj.tolist()\n",
    "    data_opposit.append(d)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Load model for real time interactions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'num_layers_enc': 1, 'num_layers_dec': 5, 'd_model': 400, 'dff': 512, 'num_heads': 8, 'dropout_rate': 0.1, 'wp_d': 4, 'num_emb_vec': 4, 'bs': 16, 'dense_n': 512, 'num_dense': 3, 'concat_emb': True, 'features_n': 793, 'optimizer': 'adam', 'norm_layer': True, 'activation': 'tanh'}\n",
      "loading weights:  /home/arthur/Desktop/projects/LaTTe-Language-Trajectory-TransformEr/models/TF-num_layers_enc:1-num_layers_dec:5-d_model:400-dff:512-num_heads:8-dropout_rate:0.1-wp_d:4-num_emb_vec:4-bs:16-dense_n:512-num_dense:3-concat_emb:True-features_n:793-optimizer:adam-norm_layer:True-activation:tanh.h5\n",
      "DONE\n"
     ]
    }
   ],
   "source": [
    "from src.TF4D_mult_features import *\n",
    "# model_path = models_folder+\"FINAL_dataset_size_aug_fixsteps/\"\n",
    "model_path = \"/home/arthur/Desktop/projects/LaTTe-Language-Trajectory-TransformEr/models/\"\n",
    "# model_name = \"TF-num_layers_enc:1-num_layers_dec:5-d_model:400-dff:512-num_heads:8-dropout_rate:0.1-wp_d:4-num_emb_vec:4-bs:16-dense_n:512-num_dense:3-concat_emb:False-features_n:793-optimizer:adam-norm_layer:True-activation:tanh-loss:mse-sf:0.5-augment:1.h5\"\n",
    "model_name = \"TF-num_layers_enc:1-num_layers_dec:5-d_model:400-dff:512-num_heads:8-dropout_rate:0.1-wp_d:4-num_emb_vec:4-bs:16-dense_n:512-num_dense:3-concat_emb:True-features_n:793-optimizer:adam-norm_layer:True-activation:tanh.h5\"\n",
    "model_file = model_path+model_name\n",
    "\n",
    "model = load_model(model_file, delimiter=\"-\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Experiment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The autoreload extension is already loaded. To reload it, use:\n",
      "  %reload_ext autoreload\n"
     ]
    }
   ],
   "source": [
    "%matplotlib qt\n",
    "\n",
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "\n",
    "from matplotlib import pyplot as plt\n",
    "import numpy as np\n",
    "from mpl_toolkits.mplot3d import Axes3D\n",
    "from matplotlib import animation\n",
    "from matplotlib.widgets import Button,Slider, TextBox, RadioButtons\n",
    "\n",
    "import re\n",
    "from src.functions import *\n",
    "from matplotlib import rc\n",
    "import textwrap\n",
    "import random\n",
    "import json\n",
    "from src.config import *\n",
    "import datetime\n",
    "import sys\n",
    "\n",
    "# rc('text', usetex=True)\n",
    "\n",
    "class User_study_interface():\n",
    "\n",
    "    def __init__(self, data_ditributions, samples_per_data=1, interaction_samples=1, dis_names=None, model=None, dev_mode=False):\n",
    "        \"\"\"data_ditributions: list, with the different data distributions\n",
    "        samples_per_data: int number of samples per distribution to present the user\n",
    "        dis_names: list of with the distributuions names\"\"\"\n",
    "\n",
    "        self.dev_mode = dev_mode\n",
    "        self.model = model\n",
    "        self.dis_names = [str(i) for i in range(len(data_ditributions))] if dis_names is None else dis_names\n",
    "        self.num_samples = samples_per_data*len(data_ditributions) # per ditribution\n",
    "        exp_data = []\n",
    "        exp_data_indices = []\n",
    "        exp_sample_indices = []\n",
    "\n",
    "        self.interaction_text = {}\n",
    "        self.samples_inices = {}\n",
    "        self.user_answers = {}\n",
    "        self.interaction_samples = interaction_samples\n",
    "        for i,data_dis in enumerate(data_ditributions):\n",
    "\n",
    "            samples_inices = random.choices(range(len(data_dis)), k=samples_per_data)\n",
    "            # print(self.dis_names[i],samples_inices)\n",
    "            exp_data = exp_data + np.array(data_dis)[samples_inices].tolist()\n",
    "            \n",
    "            # exp_data = exp_data + random.choices(data_dis, k=samples_per_data)\n",
    "            self.samples_inices[self.dis_names[i]] = samples_inices\n",
    "            \n",
    "            exp_data_indices = exp_data_indices + [i]*samples_per_data\n",
    "            exp_sample_indices = exp_sample_indices + samples_inices\n",
    "\n",
    "\n",
    "        index_shuf = list(range(len(exp_data)))\n",
    "        random.shuffle(index_shuf)\n",
    "        # print(index_shuf)\n",
    "        self.exp_data = [exp_data[i].copy() for i in index_shuf]\n",
    "        self.exp_data_indices = [exp_data_indices[i] for i in index_shuf]\n",
    "        self.exp_sample_indices = [exp_sample_indices[i] for i in index_shuf]\n",
    "\n",
    "\n",
    "        data_dis = data_ditributions[0]\n",
    "        samples_inices = random.choices(range(len(data_dis)), k=interaction_samples)\n",
    "        self.exp_data  = self.exp_data + np.array(data_dis)[samples_inices].tolist()\n",
    "        self.samples_inices[\"interaction\"] = samples_inices\n",
    "        self.exp_data_indices = self.exp_data_indices + [i]*interaction_samples\n",
    "        self.exp_sample_indices = self.exp_sample_indices + samples_inices\n",
    "\n",
    "        self.w = textwrap.TextWrapper(width=60,break_long_words=False,replace_whitespace=False)\n",
    "        \n",
    "        self.init_setup()\n",
    "\n",
    "    def init_setup(self):\n",
    "        self.fig = plt.figure(figsize=(8,13))\n",
    "        self.fig.add_subplot(1,1,1,projection='3d')\n",
    "        self.ax = plt.gca(projection='3d')\n",
    "        # self.ax.set_xlabel('X axis')\n",
    "        # self.ax.set_ylabel('Y axis')\n",
    "        # self.ax.set_zlabel('Z axis')\n",
    "        if not self.dev_mode:\n",
    "            self.ax.axes.xaxis.set_ticklabels([])\n",
    "            self.ax.axes.yaxis.set_ticklabels([])\n",
    "            self.ax.axes.zaxis.set_ticklabels([])\n",
    "\n",
    "        self.ani=None\n",
    "        self.lines = []\n",
    "        self.tip_marker = []\n",
    "        self.objs_scatter = []\n",
    "        self.objs_text = []\n",
    "        self.colors = []\n",
    "        self.enable_save = False\n",
    "        self.user_data = {\"name\":\"\", \"age\":\"\"}\n",
    "        self.eval_text_list = ['yes, much better', 'yes, a bit better' , 'same', 'No, a bit wrong', 'No, totally wrong']\n",
    "\n",
    "        self.sample_i =1\n",
    "\n",
    "        self.alert_text = None\n",
    "        self.count_text = self.ax.text2D(0.05, 0.95, str(self.sample_i)+\"/\"+str(self.num_samples+self.interaction_samples), transform=self.ax.transAxes)\n",
    "\n",
    "\n",
    "        self.setup_bts()\n",
    "        if self.sample_i > self.num_samples:\n",
    "            if self.sample_i <= self.num_samples+self.interaction_samples:\n",
    "                self.reset()\n",
    "                self.plot_intercative()\n",
    "            else:\n",
    "                pass\n",
    "        else:\n",
    "            self.plot_sample(self.exp_data[self.sample_i-1])\n",
    "\n",
    "            self.reset_bts()\n",
    "\n",
    "        self.plot_markers()\n",
    "\n",
    "        self.fig.canvas.mpl_connect('key_press_event', self.on_key_press_event)\n",
    "        plt.show()\n",
    "\n",
    "    def update(self, num, trajs, lines, tips):\n",
    "\n",
    "        for i, line in enumerate(lines):\n",
    "            line.set_data(trajs[i][:2,:num])\n",
    "            line.set_3d_properties(trajs[i][2,:num])\n",
    "        for i, tip in enumerate(tips):\n",
    "            n = max(num-1, 0)\n",
    "            tip.set_data(trajs[i][:2,n:n+1])\n",
    "            tip.set_3d_properties(trajs[i][2,n:n+1])\n",
    "    \n",
    "\n",
    "    def on_key_press_event(self, event):\n",
    "        sys.stdout.flush()\n",
    "        if event.key == 'enter':\n",
    "            if self.sample_i > self.num_samples:\n",
    "                if self.sample_i <= self.num_samples+self.interaction_samples:\n",
    "                    self.predict(event)\n",
    "\n",
    "    def next_cb(self, event):\n",
    "        if self.sample_i in self.user_answers.keys():\n",
    "            self.sample_i += 1\n",
    "            if self.sample_i > self.num_samples:\n",
    "                if self.sample_i <= self.num_samples+self.interaction_samples:\n",
    "                    self.reset()\n",
    "                    self.plot_intercative()\n",
    "                else:\n",
    "                    self.reset()\n",
    "                    self.plot_final_screen()\n",
    "            \n",
    "            else:\n",
    "                self.plot_sample(self.exp_data[self.sample_i-1])\n",
    "                self.reset_bts()\n",
    "\n",
    "        else:\n",
    "            if not self.alert_text is None:\n",
    "                self.alert_text.remove()\n",
    "            self.alert_text = self.ax.text2D(0.50, -0.1,\"Please answer the question first!\", transform=self.ax.transAxes, color=\"red\")\n",
    "        \n",
    "\n",
    "    def prev_cb(self, event):\n",
    "        if self.sample_i > 1:\n",
    "            self.sample_i -= 1\n",
    "            if self.sample_i > self.num_samples:\n",
    "                if self.sample_i <= self.num_samples+self.interaction_samples:\n",
    "                    self.reset()\n",
    "                    self.plot_intercative()\n",
    "                else:\n",
    "                    pass\n",
    "            else:\n",
    "                self.plot_sample(self.exp_data[self.sample_i-1])\n",
    "\n",
    "                self.reset_bts()\n",
    "\n",
    "\n",
    "    def eval_cb(self, labels):\n",
    "        self.user_answers[self.sample_i] = self.eval_text_list.index(labels)\n",
    "\n",
    "        # self.radio.remove()\n",
    "    \n",
    "\n",
    "    def plot_final_screen(self):\n",
    "        self.final_bg = plt.axes([0.05, 0.05, 0.9, 0.9], facecolor=\"white\")\n",
    "\n",
    "\n",
    "        self.final_bg.set_xticks([])\n",
    "        self.final_bg.set_yticks([])\n",
    "        final_text = self.final_bg.text(0.05, 0.7,\"Thank you for helping us :)\", transform=self.final_bg.transAxes, color=\"Black\", fontweight=\"bold\", fontsize=27)\n",
    "\n",
    "        self.save_text = self.final_bg.text(0.2, 0.63,\" \", transform=self.final_bg.transAxes, color=\"white\")\n",
    "\n",
    "        self.ax_name_box = plt.axes([0.2, 0.5, 0.4, 0.04])\n",
    "        self.name_box = TextBox(self.ax_name_box, 'your name:', initial=self.user_data[\"name\"])\n",
    "        self.name_box.on_submit(lambda val: self.log_entry(val,\"name\"))\n",
    "\n",
    "        self.ax_age_box = plt.axes([0.2, 0.4, 0.4, 0.04])\n",
    "        self.age_box = TextBox(self.ax_age_box, 'your age:', initial=self.user_data[\"age\"])\n",
    "        self.age_box.on_submit(lambda val: self.log_entry(val,\"age\"))\n",
    "\n",
    "        self.axsave = plt.axes([0.2, 0.2, 0.20, 0.075])\n",
    "        self.btsave = Button(self.axsave, 'SAVE and EXIT')\n",
    "        self.btsave.on_clicked(self.save_data)\n",
    "    \n",
    "    def plot_intercative(self):\n",
    "        # self.interactive_bg = plt.axes([0.05, 0.7, 0.9, 0.3], facecolor=\"white\")\n",
    "        # self.interactive_bg.set_xticks([])\n",
    "        # self.interactive_bg.set_yticks([])\n",
    "        \n",
    "        # final_text = self.final_bg.text(0.05, 0.7,\"Thank you for helping us :)\", transform=self.final_bg.transAxes, color=\"Black\", fontweight=\"bold\", fontsize=27)\n",
    "\n",
    "        # self.save_text = self.final_bg.text(0.2, 0.63,\" \", transform=self.final_bg.transAxes, color=\"white\")\n",
    "\n",
    "        self.nl_box_ax = plt.axes([0.2, 0.85, 0.4, 0.04])\n",
    "        init_text = \"\" if not self.sample_i in self.interaction_text.keys() else self.interaction_text[self.sample_i]\n",
    "        self.nl_box = TextBox(self.nl_box_ax, '', initial=init_text )\n",
    "        self.nl_box.on_submit(self.register_interaction)\n",
    "\n",
    "        self.axmodify = plt.axes([0.6, 0.85, 0.20, 0.040])\n",
    "        self.btmodify = Button(self.axmodify, 'modify trajectory')\n",
    "        self.btmodify.on_clicked(self.predict)\n",
    "\n",
    "\n",
    "        if self.dev_mode:\n",
    "            self.lf_ax = plt.axes([0.2, 0.80, 0.4, 0.04])\n",
    "            self.lf_slider = Slider(\n",
    "                ax=self.lf_ax,\n",
    "                label='Locality factor',\n",
    "                valmin=0.0,\n",
    "                valmax=1.0,\n",
    "                valinit=self.exp_data[self.sample_i-1][\"locality_factor\"],\n",
    "            )\n",
    "            self.lf_slider.on_changed(self.update_lf)\n",
    "\n",
    "\n",
    "            # register the update function with each slider\n",
    "\n",
    "\n",
    "        plot_out = self.sample_i in self.interaction_text.keys()\n",
    "        self.plot_sample(self.exp_data[self.sample_i-1],plot_out=plot_out)\n",
    "        self.ax.set_title(\" Type your instruction:\\n\\n\\n\\n\\n\\n\", fontsize=18,  fontname=\"Times New Roman\")\n",
    "        self.reset_bts()\n",
    "\n",
    "    def update_lf(self, val):\n",
    "        self.exp_data[self.sample_i-1][\"locality_factor\"] = val\n",
    "\n",
    "    def register_interaction(self, text):\n",
    "        self.interaction_text[self.sample_i] = text\n",
    "\n",
    "    def predict(self, event):\n",
    "        if self.sample_i in self.interaction_text.keys():\n",
    "            d = self.exp_data[self.sample_i-1]\n",
    "            pred, traj = mr.apply_interaction(self.model, d, self.interaction_text[self.sample_i],  label=False, images=None)\n",
    "            self.pred=pred[0]\n",
    "            self.exp_data[self.sample_i-1][\"output_traj\"] = pred[0].tolist()\n",
    "            # x_t = (mr.prepare_x(X_test), list_to_wp_seq(y_test,d=4), X_test[:,embedding_indices])\n",
    "            # self.pred = generate(model ,x_t, traj_n=traj_n).numpy()\n",
    "            print(self.pred.shape)\n",
    "            self.plot_sample(self.exp_data[self.sample_i-1],new_pred=True)\n",
    "            self.ax.set_title(\" Type your instruction:\\n\\n\\n\\n\\n\", fontsize=18,  fontname=\"Times New Roman\")\n",
    "            self.reset_bts()\n",
    "        else:\n",
    "            print(\"write first\")\n",
    "\n",
    "    def log_entry(self, val, k):\n",
    "        self.user_data[k] = str(val)\n",
    "\n",
    "        self.enable_save = True\n",
    "        for key in self.user_data.keys():\n",
    "            if self.user_data[key] == \"\":\n",
    "                self.enable_save = False\n",
    "\n",
    "    def save_data(self, name):\n",
    "        if self.enable_save:\n",
    "            if not os.path.exists(user_study_folder):\n",
    "                print(\"User study folder not found:\", user_study_folder)\n",
    "\n",
    "            summary = {d_name:{a:0 for a in self.eval_text_list} for d_name in self.dis_names}\n",
    "            answers_per_distribution = {d_name:{a:[] for a in self.eval_text_list} for d_name in self.dis_names}\n",
    "\n",
    "            for i,(k,v) in enumerate(self.user_answers.items()):\n",
    "                print(\"V\",v, \"  K: \",k)\n",
    "                dis = self.dis_names[self.exp_data_indices[int(k-1)]]\n",
    "                answer = self.eval_text_list[v]\n",
    "                summary[dis][answer]= summary[dis][answer] + 1\n",
    "                print(self.exp_sample_indices[i],\"\\t\")\n",
    "                \n",
    "                answers_per_distribution[dis][answer] = answers_per_distribution[dis][answer] + [self.exp_sample_indices[i]]\n",
    "\n",
    "            data_to_save = {\"summary\":summary,\n",
    "                            \"answers_per_distribution\":answers_per_distribution,\n",
    "                            \"num_samples\":self.num_samples,\n",
    "                            \"dis_names\":self.dis_names,\n",
    "                            \"exp_data\":self.exp_data,\n",
    "                            \"exp_data_indices\":self.exp_data_indices,\n",
    "                            \"exp_sample_indices\":self.exp_sample_indices,\n",
    "                            \"user_answers\":self.user_answers,\n",
    "                            \"interaction_text\":self.interaction_text\n",
    "                            }\n",
    "            # data_to_save\n",
    "            print(self.user_answers)\n",
    "            with open( os.path.join(user_study_folder, self.user_data[\"name\"]+\"_\"+self.user_data[\"age\"] +\"_\"+ datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\") +'.json'), 'w') as f:\n",
    "                json.dump(data_to_save, f)\n",
    "            plt.close()\n",
    "        else: \n",
    "            if not self.save_text is None:\n",
    "                self.save_text.remove()\n",
    "            self.save_text = self.final_bg.text(0.2, 0.63,\"Please complete all the fields first\", transform=self.final_bg.transAxes, color=\"red\")\n",
    "    \n",
    "    def replay_from_file(self,user_name):\n",
    "        print(user_name)\n",
    "        u = {}\n",
    "        with open(os.path.join(user_study_folder,user_name), 'r', encoding='utf-8') as f:\n",
    "            u = json.load(f)\n",
    "\n",
    "        self.num_samples = u[\"num_samples\"]\n",
    "        self.dis_names = u[\"dis_names\"]\n",
    "        self.exp_data = u[\"exp_data\"]\n",
    "        self.exp_data_indices = u[\"exp_data_indices\"]\n",
    "        self.exp_sample_indices = u[\"exp_sample_indices\"]\n",
    "        # self.interaction_text = \"interaction_text\"\n",
    "        self.sample_i =1\n",
    "\n",
    "        for k,v in u[\"interaction_text\"].items():\n",
    "            self.interaction_text[int(k)]=v\n",
    "        for k,v in u[\"user_answers\"].items():\n",
    "            self.user_answers[int(k)]=int(v)\n",
    "        self.init_setup()\n",
    "        # self.user_answers = u[\"user_answers\"]\n",
    "\n",
    "        # for k,v in u.items():\n",
    "        #     print(k,v)\n",
    "        print(self.user_answers)\n",
    "        print(self.sample_i)\n",
    "\n",
    "        if self.sample_i in self.user_answers.keys() or str(self.sample_i) in self.user_answers.keys():\n",
    "            print(\"A\")\n",
    "            print(self.user_answers[self.sample_i])\n",
    "            print(type(self.user_answers[self.sample_i]))\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    def setup_bts(self):\n",
    "        \n",
    "        self.alert_text = self.ax.text2D(0.70, 0.05, \"alert\", transform=self.ax.transAxes, color=\"white\")\n",
    "        axcolor = 'lightgoldenrodyellow'\n",
    "\n",
    "        self.rax = plt.axes([0.15, 0.05, 0.2, 0.15],\n",
    "                    facecolor=axcolor)\n",
    "                    \n",
    "        self.rax.set_title(\"Does the modified trajectory \\n follows the intruction?\")\n",
    "\n",
    "        self.axnext = plt.axes([0.66, 0.05, 0.15, 0.075])\n",
    "        if self.sample_i < self.num_samples+self.interaction_samples:\n",
    "            self.bnext = Button(self.axnext, 'Next')\n",
    "        elif self.sample_i == self.num_samples+self.interaction_samples:\n",
    "            self.bnext = Button(self.axnext, 'FINISH!')\n",
    "        \n",
    "\n",
    "        self.bnext.on_clicked(self.next_cb)\n",
    "\n",
    "        self.axprev = plt.axes([0.5, 0.05, 0.15, 0.075])\n",
    "        if self.sample_i > 1:\n",
    "            self.bprev = Button(self.axprev, 'Previous')\n",
    "\n",
    "        else:\n",
    "            self.bprev = Button(self.axprev, 'Previous',color=\"white\")\n",
    "\n",
    "        self.bprev.on_clicked(self.prev_cb)\n",
    "\n",
    "        # adjust radio buttons\n",
    "        self.radio = RadioButtons(self.rax, self.eval_text_list,\n",
    "                            active=[True,False,False,False],\n",
    "                            activecolor='r')\n",
    "\n",
    "        self.radio.on_clicked(self.eval_cb)\n",
    "        if self.sample_i in self.user_answers.keys() or str(self.sample_i) in self.user_answers.keys():\n",
    "            print(\"A\")\n",
    "            self.radio.set_active(self.user_answers[self.sample_i])\n",
    "\n",
    "\n",
    "        \n",
    "    def reset_bts(self):\n",
    "        self.axprev.remove()\n",
    "        self.axnext.remove()\n",
    "        self.rax.remove()\n",
    "        self.setup_bts()\n",
    "        \n",
    "    def plot_sample(self, d, new_pred=False, plot_out=True, no_title=False):\n",
    "        \n",
    "        self.reset()\n",
    "        \n",
    "        pts = np.asarray(d[\"input_traj\"])\n",
    "        if new_pred:\n",
    "            pts_new = self.pred\n",
    "        else:\n",
    "            pts_new = np.asarray(d[\"output_traj\"])\n",
    "        \n",
    "        text = d[\"text\"]\n",
    "        obj_names = np.asarray(d[\"obj_names\"])\n",
    "        obj_pt = np.asarray(d[\"obj_poses\"])\n",
    "        # change_type = d[\"change_type\"]\n",
    "        image_paths = d[\"image_paths\"]\n",
    "\n",
    "        objs  = {}\n",
    "        for x,y,z,name in zip(obj_pt[:,0],obj_pt[:,1],obj_pt[:,2],obj_names):\n",
    "            objs[name] = {\"value\":{\"obj_p\":[x,y,z]}}\n",
    "\n",
    "        new_pts_list = [pts_new]\n",
    "        if d[\"output_traj\"] is None:\n",
    "            new_pts_list = []\n",
    "\n",
    "        N = 100\n",
    "        dt = 0.02\n",
    "        traj_original = incorporate_speed(pts,dt=dt, N=N)\n",
    "        traj_new = incorporate_speed(pts_new,dt=dt, N=N)\n",
    "        # fig = plot_samples(text,pts,new_pts_list, objs=objs,fig=figure, show= False, plot_speed=False, labels=[\"modified\"])\n",
    "        # if not plot_out:\n",
    "        #     traj_new = traj_original.copy()\n",
    "        self.plot_objs(objs)\n",
    "        if plot_out:\n",
    "            self.trajs = [traj_original.T, traj_new.T]\n",
    "        else:\n",
    "            self.trajs = [traj_original.T] \n",
    "\n",
    "        self.plot_trajs(traj_original, traj_new,plot_out=plot_out)\n",
    "\n",
    "        if not no_title:\n",
    "            self.ax.set_title(\"INSTRUCTION:\\n\\n\"+  '\\n'.join(self.w.wrap(text))  , fontsize=18,  fontname=\"Times New Roman\")\n",
    "        else:\n",
    "\n",
    "            self.ax.set_title(\" \", fontsize=18,  fontname=\"Times New Roman\")\n",
    "        set_axes_equal(self.ax)\n",
    "\n",
    "        self.animate(N=100,plot_out=plot_out)\n",
    "\n",
    "    def plot_objs(self, objs):\n",
    "        for i,(name,v) in enumerate(objs.items()):\n",
    "            x,y,z = v[\"value\"][\"obj_p\"]\n",
    "            \n",
    "            color = self.colors[i] if i < len(self.colors)-1 else \"#\"+''.join([random.choice('0123456789AB') for j in range(6)])    \n",
    "\n",
    "            sc = self.ax.scatter(x,y,z, color=color, s=50)\n",
    "            # print(dir(sc))\n",
    "            self.objs_scatter.append(sc)\n",
    "            t = self.ax.text(x, y, z, name, 'x', color=color, ha='center', fontweight=\"bold\")\n",
    "            self.objs_text.append(t)\n",
    "\n",
    "    def plot_trajs(self, pts, pts_new, plot_out=True):\n",
    "\n",
    "        alpha = 0.1\n",
    "        color_original = \"red\"\n",
    "        color_modified = \"blue\"\n",
    "\n",
    "        x_init, y_init, z_init = pts[:,0],pts[:,1],pts[:,2]\n",
    "        x_new, y_new, z_new= pts_new[:,0],pts_new[:,1],pts_new[:,2]\n",
    "        \n",
    "\n",
    "        line3, = self.ax.plot(x_init, y_init, z_init,alpha=0.9,color=color_original, label=\"Original\")\n",
    "        self.lines.append(line3)\n",
    "        if plot_out:\n",
    "            line4, = self.ax.plot(x_new, y_new, z_new,alpha=0.9, color=color_modified, label=\"Modified\")\n",
    "            self.lines.append(line4)\n",
    "        handles, labels = self.ax.get_legend_handles_labels()\n",
    "        self.ax.legend(handles[::-1], labels[::-1])\n",
    "\n",
    "        line1, = self.ax.plot(x_init, y_init, z_init,alpha=alpha,color=color_original, label=\"Original\")\n",
    "        self.lines.append(line1)\n",
    "        if plot_out:\n",
    "            line2, = self.ax.plot(x_new, y_new, z_new,alpha=alpha, color=color_modified, label=\"Modified\")\n",
    "            self.lines.append(line2)\n",
    "\n",
    "        tip_marker1, = self.ax.plot(x_init[0:1], y_init[0:1], z_init[0:1], lw=2, c=color_original, marker='o')\n",
    "        self.tip_marker.append(tip_marker1)\n",
    "        if plot_out:\n",
    "            tip_marker2, = self.ax.plot(x_new[0:1], y_new[0:1], z_new[0:1], lw=2, c=color_modified, marker='o')\n",
    "            self.tip_marker.append(tip_marker2)\n",
    "\n",
    "    def plot_markers(self):\n",
    "\n",
    "        fs=30\n",
    "        dist = 0.5\n",
    "        alpha = 0.2\n",
    "        color = 'grey'\n",
    "        self.ax.text(0,dist, 0, \"front\", 'x',color=color, alpha=alpha, fontsize=fs, ha='center', va='center')\n",
    "        self.ax.text(-dist,0, 0, \"left\", 'y',color=color, alpha=alpha, fontsize=fs, ha='center', va='center')\n",
    "        self.ax.text(0,0, -dist, \"bottom\", 'x',color=color, alpha=alpha, fontsize=fs, ha='center', va='center')\n",
    "\n",
    "        # self.ax.text(0,-dist, 0, \"back\", 'x',color='red', alpha=alpha, fontsize=fs, ha='center', va='center')\n",
    "        # self.ax.text(dist,0, 0, \"right\", 'y',color='red', alpha=alpha, fontsize=fs, ha='center', va='center')\n",
    "        # self.ax.text(0,0, dist, \"up\", 'x',color='red', alpha=alpha, fontsize=fs, ha='center', va='center')\n",
    "\n",
    "\n",
    "    def reset(self):\n",
    "        self.count_text.remove()\n",
    "        self.count_text = self.ax.text2D(0.05, 0.95, str(self.sample_i)+\"/\"+str(self.num_samples+self.interaction_samples), transform=self.ax.transAxes)\n",
    "        \n",
    "        try:\n",
    "            if not self.alert_text is None:\n",
    "                self.alert_text.remove()\n",
    "        except:\n",
    "            pass\n",
    "\n",
    "        try:\n",
    "            self.interactive_bg.remove()\n",
    "        except:\n",
    "            pass\n",
    "\n",
    "        for i in range(len(self.lines)):\n",
    "            self.lines[i].remove()\n",
    "\n",
    "        for i in range(len(self.tip_marker)):\n",
    "            self.tip_marker[i].remove()\n",
    "\n",
    "        for i in range(len(self.objs_text)):\n",
    "            # print(self.objs_scatter[i])\n",
    "            # print(\"\\n\",self.objs_scatter[i])\n",
    "            self.objs_scatter[i].remove()\n",
    "            self.objs_text[i].remove()\n",
    "\n",
    "        self.objs_scatter = []\n",
    "        self.objs_text = []\n",
    "        self.lines = []\n",
    "        self.tip_marker = []\n",
    "\n",
    "\n",
    "\n",
    "    def animate(self, N=100,plot_out=True):\n",
    "        n_lines = 2 if plot_out else 1\n",
    "        self.ani = animation.FuncAnimation(self.fig, self.update, N, fargs=(self.trajs,self.lines[:n_lines], self.tip_marker[:n_lines]), interval=1000/N,cache_frame_data=False, blit=False)\n",
    "\n",
    "    \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Userstudy data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "libGL error: MESA-LOADER: failed to open iris: /usr/lib/dri/iris_dri.so: cannot open shared object file: No such file or directory (search paths /usr/lib/x86_64-linux-gnu/dri:\\$${ORIGIN}/dri:/usr/lib/dri, suffix _dri)\n",
      "libGL error: failed to load driver: iris\n",
      "libGL error: MESA-LOADER: failed to open iris: /usr/lib/dri/iris_dri.so: cannot open shared object file: No such file or directory (search paths /usr/lib/x86_64-linux-gnu/dri:\\$${ORIGIN}/dri:/usr/lib/dri, suffix _dri)\n",
      "libGL error: failed to load driver: iris\n",
      "libGL error: MESA-LOADER: failed to open swrast: /usr/lib/dri/swrast_dri.so: cannot open shared object file: No such file or directory (search paths /usr/lib/x86_64-linux-gnu/dri:\\$${ORIGIN}/dri:/usr/lib/dri, suffix _dri)\n",
      "libGL error: failed to load driver: swrast\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "findfont: Font family ['Times New Roman'] not found. Falling back to DejaVu Sans.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "write first\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 3175.10it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/39 [00:00<?, ?it/s]2023-11-02 16:55:35.607392: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:116] None of the MLIR optimization passes are enabled (registered 2)\n",
      "2023-11-02 16:55:35.607976: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2995200000 Hz\n",
      "100%|██████████| 39/39 [00:04<00:00,  9.27it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 4013.69it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 33.04it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 3851.52it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 31.42it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 2995.93it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 31.35it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 2082.57it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 32.59it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 3833.92it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 31.66it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 3960.63it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 33.92it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 4161.02it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 33.60it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 1944.51it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 32.41it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 4288.65it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 34.38it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 2131.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 33.26it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 2160.90it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 35.18it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n"
     ]
    }
   ],
   "source": [
    "import sys\n",
    "user_study = User_study_interface([data], dis_names=[\"data_pred\"],samples_per_data=1, interaction_samples=3, model=model)\n",
    "\n",
    "# user_study = User_study_interface([data,data_pred, data_no_language,data_2d, data_opposit], dis_names=[\"Ground Truth\", \"Ours\", \"No_language\",\"2D_only\",\"GT opposit\"],samples_per_data=5, interaction_samples=5,model=model)\n",
    "# user_study.replay_from_file(\"\")\n",
    "# plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### simple interactive demo - Franka"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 145,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[ 0.34151169 -0.5         0.05       -0.00724914]\n",
      "[ 0.34151169 -0.5         0.05       -0.00724914]\n",
      "[0.59380809 0.5        0.3498351  0.19161233]\n",
      "[0.59380809 0.5        0.3498351  0.19161233]\n",
      "[0.49476115 0.01200915 0.20811902 0.12201081]\n",
      "[0.49476115 0.01200915 0.20811902 0.12201081]\n",
      "[-0.3 -0.3 -0.3 -0.3]\n",
      "[0.3        0.08472734 0.23990106 0.3       ]\n",
      "[ 0.0023526  -0.01801942  0.1294534   0.07826087]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "press m\n",
      "press o\n",
      "press v\n",
      "press e\n",
      "press  \n",
      "press s\n",
      "press l\n",
      "press o\n",
      "press w\n",
      "press e\n",
      "press r\n",
      "press enter\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 147.58it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 21.53it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "press left\n",
      "press left\n",
      "press left\n",
      "press left\n",
      "press left\n",
      "press left\n",
      "press m\n",
      "press u\n",
      "press c\n",
      "press h\n",
      "press  \n",
      "press enter\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 168.12it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 22.18it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 178.73it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 21.50it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n",
      "press backspace\n",
      "press backspace\n",
      "press backspace\n",
      "press backspace\n",
      "press backspace\n",
      "press backspace\n",
      "press f\n",
      "press a\n",
      "press y\n",
      "press backspace\n",
      "press s\n",
      "press t\n",
      "press e\n",
      "press r\n",
      "press enter\n",
      "DONE - computing textual embeddings (1, 768)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:00, 163.43it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DONE - computing similarity vectors \n",
      "DONE - concatenating \n",
      "X shape (1, 953)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 39/39 [00:01<00:00, 21.39it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(40, 4)\n"
     ]
    }
   ],
   "source": [
    "from scipy.spatial.transform import Rotation as R\n",
    "\n",
    "plt.close(\"all\")\n",
    "# default initial trajectory traj\n",
    "obj_poses = np.array([[0.1,-0.5,-0.10,0],[0.2,-0.8,0.0,0],[0.2,-0.5,-0.10,0],[0.6,-0.5,-0.60,0],[0.2,-0.5,-0.10,0],[0.2,-0.5,-0.10,0]])\n",
    "obj_poses_offset = np.array([0.0, 0.0,0,0])\n",
    "\n",
    "obj_poses = obj_poses+ obj_poses_offset\n",
    "\n",
    "base_wp = np.array([[0,0,0,0.0],[0.2,-0.25,0.2,0.1],[0.2,-0.5,0.3,0.2],[0.1,-0.75,0.2,0.2],[0.0,-0.8,0.1,0.1],[0.0,-1.0,0.0,0.0]])\n",
    "offset = np.array([0.0, 0.0,0.0,0])\n",
    "base_wp = base_wp + offset\n",
    "base_wp = base_wp / 1.0\n",
    "\n",
    "init_pose =  np.array([0.382723920642, 0.5, 0.05,0])\n",
    "base_wp = base_wp + init_pose\n",
    "obj_poses = obj_poses+ init_pose\n",
    "\n",
    "obj_names = [\"table\"]\n",
    "# text = \"keep a bigger distance from the actor\" #distance\n",
    "# text = \"go to the bottom\"                      #cartesian\n",
    "# text = \"fly slower when next to the table\"       #speed\n",
    "# text = \"stay furthe away from the cup\"       #speed\n",
    "text = \" \"       #speed\n",
    "\n",
    "\n",
    "def interpolate_traj(wps,traj_n=40, offset=[0,0,0,0]):\n",
    "    #create spline function\n",
    "    f, u = interpolate.splprep([wps[:,0],wps[:,1],wps[:,2]], s=0)\n",
    "    xint,yint,zint= interpolate.splev(np.linspace(0, 1, traj_n), f)\n",
    "\n",
    "    tck,u = interpolate.splprep([np.linspace(0,1,len(wps[:,3])), wps[:,3]])\n",
    "    velint_x, velint = interpolate.splev(np.linspace(0, 1, traj_n), tck)\n",
    "\n",
    "    traj = np.stack([xint,yint,zint,velint],axis=1)+offset\n",
    "    return traj\n",
    "\n",
    "def norm_traj_and_objs(t, o, margin=0.40, rotation_degrees = 0, rotation_axis = np.array([0, 0, 1])):\n",
    "\n",
    "    rotation_radians = np.radians(rotation_degrees)\n",
    "    rotation_vector = rotation_radians * rotation_axis\n",
    "    rotation = R.from_rotvec(rotation_vector)\n",
    "\n",
    "    pts_ = np.concatenate([o,t])\n",
    "\n",
    "    vel = pts_[:,3:]\n",
    "    pts = pts_[:,:3]\n",
    "    pts = rotation.apply(pts)\n",
    "\n",
    "    vel_min = np.min(vel,axis = 0)\n",
    "    vel_max = np.max(vel,axis = 0)\n",
    "    vel_norm = np.max(np.abs(vel_max-vel_min))\n",
    "\n",
    "    if vel_norm > 1e-10:\n",
    "        vel = ((vel-vel_min)/vel_norm)*(1-margin)+margin/2-0.5 # old\n",
    "        # vel = ((vel-(vel_max-vel_min)/2)/vel_norm)*(1-margin)\n",
    "\n",
    "    else:\n",
    "        vel = vel-vel_min\n",
    "\n",
    "    pts_min = np.min(pts,axis = 0)\n",
    "    pts_max = np.max(pts,axis = 0)\n",
    "    pts_norm = np.max(np.abs(pts_max-pts_min))\n",
    "\n",
    "    pts  = ((pts-pts_min)/pts_norm)*(1-margin)+margin/2-0.5 # old\n",
    "    # pts  = ((pts-(pts_max-pts_min)/2)/pts_norm)*(1-margin)\n",
    "\n",
    "\n",
    "    pts_new= np.concatenate([pts,vel],axis=-1)\n",
    "    o_new = pts_new[:o.shape[0],:]\n",
    "    t_new = pts_new[o.shape[0]:,:]\n",
    "\n",
    "    return t_new, o_new, [pts_norm, pts_min,vel_norm, vel_min, margin,  rotation_degrees,  rotation_axis]\n",
    "    # return t_new, o_new, [pts_norm, (pts_max-pts_min)/2,vel_norm, (vel_max-vel_min)/2, margin]\n",
    "\n",
    "def rescale(pts_, factor_list):\n",
    "\n",
    "    vel = pts_[:,3:]\n",
    "    pts = pts_[:,:3]\n",
    "\n",
    "    pts_norm, pts_min,vel_norm, vel_min, margin,  rotation_degrees,  rotation_axis = factor_list #old\n",
    "    # pts_norm, pts_avr,vel_norm, vel_avr, margin = factor_list\n",
    "    rotation_radians = np.radians(rotation_degrees)\n",
    "    rotation_vector = -rotation_radians * rotation_axis\n",
    "    rotation = R.from_rotvec(rotation_vector)\n",
    "\n",
    "    pts = (pts+0.5-margin/2)/(1-margin)*pts_norm+pts_min # old\n",
    "    if vel_norm > 1e-10:\n",
    "        vel = (vel+0.5-margin/2)/(1-margin)*vel_norm+vel_min# old\n",
    "    else:\n",
    "        vel = vel+vel_min\n",
    "\n",
    "    # pts = pts/(1-margin)*pts_norm+pts_avr\n",
    "    # vel = vel/(1-margin)*vel_norm+vel_avr\n",
    "    pts = rotation.apply(pts)\n",
    "\n",
    "    pts_new= np.concatenate([pts,vel],axis=-1)\n",
    "\n",
    "    return pts_new\n",
    "\n",
    "\n",
    "traj = interpolate_traj(base_wp,traj_n=traj_n)\n",
    "\n",
    "traj_, obj_poses_, factor_list = norm_traj_and_objs(traj, obj_poses, rotation_degrees = -90)\n",
    "\n",
    "obj_poses_ = obj_poses_[:,:3]\n",
    "\n",
    "traj_new = rescale(traj_, factor_list)\n",
    "\n",
    "d = np2data(traj_, obj_names, obj_poses_, text, locality_factor=0.5)[0]\n",
    "\n",
    "\n",
    "print(np.min(traj,axis=0))\n",
    "print(np.min(traj_new,axis=0))\n",
    "\n",
    "print(np.max(traj,axis=0))\n",
    "print(np.max(traj_new,axis=0))\n",
    "\n",
    "print(np.average(traj,axis=0))\n",
    "print(np.average(traj_new,axis=0))\n",
    "\n",
    "\n",
    "traj_objs = np.concatenate([d[\"input_traj\"],pad_array( d[\"obj_poses\"],4,axis=-1)])\n",
    "print(np.min(traj_objs,axis=0))\n",
    "print(np.max(traj_objs,axis=0))\n",
    "print(np.average(traj_objs,axis=0))\n",
    "\n",
    "\n",
    "d[\"input_traj\"] = d[\"input_traj\"].tolist()\n",
    "d[\"output_traj\"] = d[\"input_traj\"]\n",
    "\n",
    "# d = data[10]\n",
    "\n",
    "\n",
    "# for d_ in data[:10]:\n",
    "#     traj_objs = np.concatenate([d_[\"input_traj\"],pad_array(np.array(d_[\"obj_poses\"]),4,axis=-1)])\n",
    "#     print(np.min(traj_objs,axis=0))\n",
    "#     print(np.max(traj_objs,axis=0))\n",
    "#     print(np.average(traj_objs,axis=0))\n",
    "#     print(\"-----------------------------\")\n",
    "\n",
    "\n",
    "# data_new = []\n",
    "# data_new.append({\"input_traj\": traj.tolist(), \"output_traj\": traj.tolist(), \"text\": text, \"obj_names\": obj_names,\n",
    "#                 \"obj_poses\": obj_poses,\"locality_factor\": 0.5,\"image_paths\":None, \"change_type\":None})\n",
    "\n",
    "# user_study = User_study_interface([data[:1]], dis_names=[\"data_pred\"],samples_per_data=0, interaction_samples=1, model=model)\n",
    "user_study = User_study_interface([[d]], dis_names=[\"data_pred\"],samples_per_data=0, interaction_samples=10, model=model, dev_mode=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### replay userstudy data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "user_study.replay_from_file(\"Sabrinaaaaaaaaaaaaaaaaaa_25_20220915-224920.json\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### visualize results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'Ground Truth': {'yes, much better': 14, 'yes, a bit better': 21, 'same': 10, 'No, a bit wrong': 4, 'No, totally wrong': 1}, 'Ours': {'yes, much better': 11, 'yes, a bit better': 22, 'No, totally wrong': 1, 'same': 13, 'No, a bit wrong': 3}, 'No_language': {'yes, much better': 12, 'No, a bit wrong': 13, 'yes, a bit better': 5, 'No, totally wrong': 17, 'same': 3}, '2D_only': {'yes, a bit better': 13, 'same': 6, 'No, totally wrong': 9, 'yes, much better': 9, 'No, a bit wrong': 13}, 'GT opposit': {'No, totally wrong': 15, 'yes, a bit better': 4, 'No, a bit wrong': 22, 'same': 5, 'yes, much better': 4}, 'Ours w/ user text': {'yes, a bit better': 18, 'No, a bit wrong': 8, 'yes, much better': 5, 'same': 15, 'No, totally wrong': 4}}\n",
      "\n",
      " ['go further away from combination lock', 'go much closer to Shih-Tzu', 'go to the bottom', 'go to oxygen mask', 'go much faster when passing snowmobile', 'go slower while passing plate', 'stay bottom', 'a lot faster', 'go slower when reaching Lakeland terrier', 'top', 'stay closer to snake fence', 'go faster water ox', 'go further away from pier', 'go closer seashore', 'go closer to ski mask', 'Hit the tabby', 'Surround the knee pad', 'go faster to hit the bottom', 'go slower and avoid the space bar', 'Go to the coon bear', 'go faster when passing near the kite', 'go really further away from lacewing fly', 'go a bit slower around palace', 'go to bottom ', 'slow down near abacus', 'move close the hot pot', 'front', 'move far away sea snake', 'lower', 'move upper', 'go down', 'go closer to tray', 'go slower near the knot', 'go faster near palace', 'go down', 'go to the limo', 'do not pass the chiffonier', 'slow down when passing blue jack', 'go to salt shaker', 'go to envelope', 'Pass next  to the power grill', 'pass nearer to the harvestman', 'Get nearer to the right', 'pass in between roundabout and power grill', 'Walk nearer to the bottom', 'move closer to the head', 'stay left', 'move faster', 'use direct way', 'avoid trike']\n",
      "Ground Truth {'yes, much better': 14, 'yes, a bit better': 21, 'same': 10, 'No, a bit wrong': 4, 'No, totally wrong': 1} \n",
      "\n",
      "Ours {'yes, much better': 11, 'yes, a bit better': 22, 'No, totally wrong': 1, 'same': 13, 'No, a bit wrong': 3} \n",
      "\n",
      "GT opposit {'No, totally wrong': 15, 'yes, a bit better': 4, 'No, a bit wrong': 22, 'same': 5, 'yes, much better': 4} \n",
      "\n",
      "No_language {'yes, much better': 12, 'No, a bit wrong': 13, 'yes, a bit better': 5, 'No, totally wrong': 17, 'same': 3} \n",
      "\n",
      "2D_only {'yes, a bit better': 13, 'same': 6, 'No, totally wrong': 9, 'yes, much better': 9, 'No, a bit wrong': 13} \n",
      "\n"
     ]
    }
   ],
   "source": [
    "\n",
    "dis_names = [\"Ground Truth\", \"Ours\", \"No_language\",\"2D_only\",\"GT opposit\",\"Ours w/ user text\"]\n",
    "answers=['yes, much better', 'yes, a bit better', 'same', 'No, a bit wrong', 'No, totally wrong']\n",
    "\n",
    "text_interactions=[]\n",
    "\n",
    "total={d_n:{} for d_n in dis_names}\n",
    "plot_data={d_n:0 for d_n in dis_names}\n",
    "num_users = 0\n",
    "for user in os.listdir(user_study_folder):\n",
    "    if 'Sabrina' in user:\n",
    "        continue\n",
    "    user_file = os.path.join(user_study_folder,user)\n",
    "    u = {}\n",
    "    with open(user_file, 'r', encoding='utf-8') as f:\n",
    "        u = json.load(f)\n",
    "\n",
    "    for i,dis_i in enumerate(u[\"exp_data_indices\"]):\n",
    "        if i < len(u[\"exp_data_indices\"])-5:\n",
    "            d_n = dis_names[dis_i]\n",
    "        else:\n",
    "            d_n = dis_names[-1]\n",
    "        a = answers[int(u[\"user_answers\"][str(i+1)])]\n",
    "        if not a in total[d_n].keys():\n",
    "            total[d_n][a] = 0\n",
    "        total[d_n][a]=total[d_n][a]+ 1\n",
    "    \n",
    "    # print(u.keys())\n",
    "    # print(u['interaction_text'])\n",
    "    text_interactions =text_interactions+list(u['interaction_text'].values())\n",
    "    num_users += 1\n",
    "    # for d_n in dis_names:\n",
    "    #     for a in answers:\n",
    "    #         if not a in total[d_n].keys():\n",
    "    #             total[d_n][a] = 0\n",
    "    #         total[d_n][a]=total[d_n][a]+ int(u[\"summary\"][d_n][a])\n",
    "    # print(u[\"user_answers\"],\"\\n\")\n",
    "    # print(u[\"exp_data_indices\"],\"\\n\")\n",
    "print(total)\n",
    "print(\"\\n\",text_interactions)\n",
    "plot_dis_names = [\"Ground Truth\", \"Ours\",\"GT opposit\", \"No_language\",\"2D_only\",]\n",
    "plot_dis_names_legend = [\"Ground Truth\", \"Ours\",\"Ground Fake\", \"No language\",\"Projected 2D model\"]\n",
    "\n",
    "for d_n in plot_dis_names:\n",
    "    print(d_n,total[d_n], \"\\n\")\n",
    "\n",
    "barWidth = 0.15\n",
    "fig = plt.subplots(figsize =(12, 8))\n",
    "\n",
    "plt.grid(axis = 'y', linewidth = 0.5)\n",
    "br = [np.arange(len(plot_dis_names))]\n",
    "\n",
    "colors=[\"#4f963b\",\"#839642\",\"#f5b342\",\"#8a4f0b\",\"#8c152d\"]\n",
    "for i,a in enumerate(answers):\n",
    "    v = [total[d_n][a]*100/(5*num_users) for d_n in plot_dis_names]\n",
    "    plt.bar(br[i], v, color =colors[i], width = barWidth,\n",
    "        edgecolor ='grey', label =a)\n",
    "    br.append([x + barWidth for x in br[i]])\n",
    "    \n",
    "plt.xlabel('', fontsize = 15)\n",
    "plt.ylabel('percentage of answers  [%]', fontsize = 15)#fontweight ='bold', \n",
    "plt.xticks([r + barWidth for r in range(len(plot_dis_names))],\n",
    "        plot_dis_names_legend)\n",
    "plt.legend()\n",
    "plt.show()\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10\n",
      "Ours \t 66.00 \t 26.00 \t 8.00\n",
      "Ours w/ user text \t 46.00 \t 30.00 \t 24.00\n"
     ]
    }
   ],
   "source": [
    "print(num_users)\n",
    "for dis in [dis_names[1],dis_names[-1]]:\n",
    "\n",
    "    better = (total[dis]['yes, a bit better'] + total[dis]['yes, much better'])*100.0/(5*num_users)\n",
    "    same = (total[dis]['same'])*100.0/(5*num_users)\n",
    "    worse = (total[dis]['No, a bit wrong'] + total[dis]['No, totally wrong'])*100.0/(5*num_users)\n",
    "\n",
    "    print(dis, \"\\t {:.2f} \\t {:.2f} \\t {:.2f}\".format(better,same, worse) )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1929\n",
      "99\n",
      "dict_keys(['go', 'further', 'away', 'from', 'combination', 'lock', 'much', 'closer', 'to', 'Shih-Tzu', 'the', 'bottom', 'oxygen', 'mask', 'faster', 'when', 'passing', 'snowmobile', 'slower', 'while', 'plate', 'stay', 'a', 'lot', 'reaching', 'Lakeland', 'terrier', 'top', 'snake', 'fence', 'water', 'ox', 'pier', 'seashore', 'ski', 'Hit', 'tabby', 'Surround', 'knee', 'pad', 'hit', 'and', 'avoid', 'space', 'bar', 'Go', 'coon', 'bear', 'near', 'kite', 'really', 'lacewing', 'fly', 'bit', 'around', 'palace', '', 'slow', 'down', 'abacus', 'move', 'close', 'hot', 'pot', 'front', 'far', 'sea', 'lower', 'upper', 'tray', 'knot', 'limo', 'do', 'not', 'pass', 'chiffonier', 'blue', 'jack', 'salt', 'shaker', 'envelope', 'Pass', 'next', 'power', 'grill', 'nearer', 'harvestman', 'Get', 'right', 'in', 'between', 'roundabout', 'Walk', 'head', 'left', 'use', 'direct', 'way', 'trike'])\n",
      "23\n",
      "dict_keys(['reaching', 'Hit', 'Surround', 'hit', 'avoid', 'Go', 'really', 'around', 'slow', 'move', 'far', 'lower', 'do', 'not', 'Pass', 'grill', 'nearer', 'Get', 'between', 'Walk', 'use', 'direct', 'way'])\n",
      "0.23232323232323232\n",
      "24\n",
      "0.48\n",
      "['go further away from combination lock', 'go much closer to Shih-Tzu', 'go to the bottom', 'go to oxygen mask', 'go much faster when passing snowmobile', 'go slower while passing plate', 'stay bottom', 'a lot faster', 'go slower when reaching Lakeland terrier', 'top', 'stay closer to snake fence', 'go faster water ox', 'go further away from pier', 'go closer seashore', 'go closer to ski mask', 'Hit the tabby', 'Surround the knee pad', 'go faster to hit the bottom', 'go slower and avoid the space bar', 'Go to the coon bear', 'go faster when passing near the kite', 'go really further away from lacewing fly', 'go a bit slower around palace', 'go to bottom ', 'slow down near abacus', 'move close the hot pot', 'front', 'move far away sea snake', 'lower', 'move upper', 'go down', 'go closer to tray', 'go slower near the knot', 'go faster near palace', 'go down', 'go to the limo', 'do not pass the chiffonier', 'slow down when passing blue jack', 'go to salt shaker', 'go to envelope', 'Pass next  to the power grill', 'pass nearer to the harvestman', 'Get nearer to the right', 'pass in between roundabout and power grill', 'Walk nearer to the bottom', 'move closer to the head', 'stay left', 'move faster', 'use direct way', 'avoid trike']\n"
     ]
    }
   ],
   "source": [
    "unique_words = {}\n",
    "for d in data:\n",
    "    words = d[\"text\"].split(\" \")\n",
    "    for w in words:\n",
    "        unique_words[w]=1\n",
    "\n",
    "print(len(unique_words.keys()))\n",
    "user_unique_words = {}\n",
    "for t in text_interactions:\n",
    "    words = t.split(\" \")\n",
    "    for w in words:\n",
    "        user_unique_words[w] = 1\n",
    "print(len(user_unique_words.keys()))\n",
    "print(user_unique_words.keys())\n",
    "out_of_dis = {}\n",
    "for w in user_unique_words.keys():\n",
    "    if not w in unique_words.keys():\n",
    "        out_of_dis[w] = 1\n",
    "print(len(out_of_dis.keys()))\n",
    "print(out_of_dis.keys())\n",
    "\n",
    "print(len(out_of_dis.keys())/float(len(user_unique_words.keys())))\n",
    "\n",
    "samples_out_of_dis = []\n",
    "for t in text_interactions:\n",
    "    words = t.split(\" \")\n",
    "    for w in words:\n",
    "        if w in out_of_dis.keys():\n",
    "            samples_out_of_dis.append(t)\n",
    "            break\n",
    "\n",
    "print(len(samples_out_of_dis))\n",
    "print(len(samples_out_of_dis)/float(len(text_interactions)))\n",
    "print(text_interactions)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.8.13 ('py38_cu11_2')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.2"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "5de91200c9f9e1f8a0c28ceba668014be0fd55838e84400e0a7ad1d269192773"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
