sia_tp_sample / 12dmodel__deep_motion_mag.jsonl
shahp7575's picture
commit files to HF hub
3a7f06a
{"nwo":"12dmodel\/deep_motion_mag","sha":"485243bd7428d08059c313321b5e6ebfd7f61991","path":"preprocessor.py","language":"python","identifier":"get_possion_noise","parameters":"(image)","argument_list":"","return_statement":"return tf.multiply(n, n_str)","docstring":"Add poisson noise.\n\n This function approximate posson noise upto 2nd order.\n Assume images were in 0-255, and converted to the range of -1 to 1.","docstring_summary":"Add poisson noise.","docstring_tokens":["Add","poisson","noise","."],"function":"def get_possion_noise(image):\n \"\"\"Add poisson noise.\n\n This function approximate posson noise upto 2nd order.\n Assume images were in 0-255, and converted to the range of -1 to 1.\n \"\"\"\n n = tf.random_normal(shape=tf.shape(image), mean=0.0, stddev=1.0)\n # strength ~ sqrt image value in 255, divided by 127.5 to convert\n # back to -1, 1 range.\n n_str = tf.sqrt(image + 1.0) \/ np.sqrt(127.5)\n return tf.multiply(n, n_str)","function_tokens":["def","get_possion_noise","(","image",")",":","n","=","tf",".","random_normal","(","shape","=","tf",".","shape","(","image",")",",","mean","=","0.0",",","stddev","=","1.0",")","# strength ~ sqrt image value in 255, divided by 127.5 to convert","# back to -1, 1 range.","n_str","=","tf",".","sqrt","(","image","+","1.0",")","\/","np",".","sqrt","(","127.5",")","return","tf",".","multiply","(","n",",","n_str",")"],"url":"https:\/\/github.com\/12dmodel\/deep_motion_mag\/blob\/485243bd7428d08059c313321b5e6ebfd7f61991\/preprocessor.py#L16-L26"}
{"nwo":"12dmodel\/deep_motion_mag","sha":"485243bd7428d08059c313321b5e6ebfd7f61991","path":"ops.py","language":"python","identifier":"expand_dims_1_to_4","parameters":"(tensor, dims=None)","argument_list":"","return_statement":"return tf.expand_dims(\n tf.expand_dims(\n tf.expand_dims(tensor, dims[0]),\n dims[1]),\n dims[2])","docstring":"Expand dimension from 1 to 4.\n\n Useful for multiplying amplification factor.","docstring_summary":"Expand dimension from 1 to 4.","docstring_tokens":["Expand","dimension","from","1","to","4","."],"function":"def expand_dims_1_to_4(tensor, dims=None):\n \"\"\"Expand dimension from 1 to 4.\n\n Useful for multiplying amplification factor.\n \"\"\"\n if not dims:\n dims = [-1, -1, -1]\n return tf.expand_dims(\n tf.expand_dims(\n tf.expand_dims(tensor, dims[0]),\n dims[1]),\n dims[2])","function_tokens":["def","expand_dims_1_to_4","(","tensor",",","dims","=","None",")",":","if","not","dims",":","dims","=","[","-","1",",","-","1",",","-","1","]","return","tf",".","expand_dims","(","tf",".","expand_dims","(","tf",".","expand_dims","(","tensor",",","dims","[","0","]",")",",","dims","[","1","]",")",",","dims","[","2","]",")"],"url":"https:\/\/github.com\/12dmodel\/deep_motion_mag\/blob\/485243bd7428d08059c313321b5e6ebfd7f61991\/ops.py#L77-L88"}
{"nwo":"12dmodel\/deep_motion_mag","sha":"485243bd7428d08059c313321b5e6ebfd7f61991","path":"magnet.py","language":"python","identifier":"MagNet3Frames.setup_for_inference","parameters":"(self, checkpoint_dir, image_width, image_height)","argument_list":"","return_statement":"","docstring":"Setup model for inference.\n\n Build computation graph, initialize variables, and load checkpoint.","docstring_summary":"Setup model for inference.","docstring_tokens":["Setup","model","for","inference","."],"function":"def setup_for_inference(self, checkpoint_dir, image_width, image_height):\n \"\"\"Setup model for inference.\n\n Build computation graph, initialize variables, and load checkpoint.\n \"\"\"\n self.image_width = image_width\n self.image_height = image_height\n # Figure out image dimension\n self._build_feed_model()\n ginit_op = tf.global_variables_initializer()\n linit_op = tf.local_variables_initializer()\n self.sess.run([ginit_op, linit_op])\n\n if self.load(checkpoint_dir):\n print(\"[*] Load Success\")\n else:\n raise RuntimeError('MagNet: Failed to load checkpoint file.')\n self.is_graph_built = True","function_tokens":["def","setup_for_inference","(","self",",","checkpoint_dir",",","image_width",",","image_height",")",":","self",".","image_width","=","image_width","self",".","image_height","=","image_height","# Figure out image dimension","self",".","_build_feed_model","(",")","ginit_op","=","tf",".","global_variables_initializer","(",")","linit_op","=","tf",".","local_variables_initializer","(",")","self",".","sess",".","run","(","[","ginit_op",",","linit_op","]",")","if","self",".","load","(","checkpoint_dir",")",":","print","(","\"[*] Load Success\"",")","else",":","raise","RuntimeError","(","'MagNet: Failed to load checkpoint file.'",")","self",".","is_graph_built","=","True"],"url":"https:\/\/github.com\/12dmodel\/deep_motion_mag\/blob\/485243bd7428d08059c313321b5e6ebfd7f61991\/magnet.py#L198-L215"}
{"nwo":"12dmodel\/deep_motion_mag","sha":"485243bd7428d08059c313321b5e6ebfd7f61991","path":"magnet.py","language":"python","identifier":"MagNet3Frames.inference","parameters":"(self, frameA, frameB, amplification_factor)","argument_list":"","return_statement":"return out_amp","docstring":"Run Magnification on two frames.\n\n Args:\n frameA: path to first frame\n frameB: path to second frame\n amplification_factor: float for amplification factor","docstring_summary":"Run Magnification on two frames.","docstring_tokens":["Run","Magnification","on","two","frames","."],"function":"def inference(self, frameA, frameB, amplification_factor):\n \"\"\"Run Magnification on two frames.\n\n Args:\n frameA: path to first frame\n frameB: path to second frame\n amplification_factor: float for amplification factor\n \"\"\"\n in_frames = [load_train_data([frameA, frameB, frameB],\n gray_scale=self.n_channels==1, is_testing=True)]\n in_frames = np.array(in_frames).astype(np.float32)\n\n out_amp = self.sess.run(self.test_output,\n feed_dict={self.test_input: in_frames,\n self.test_amplification_factor:\n [amplification_factor]})\n return out_amp","function_tokens":["def","inference","(","self",",","frameA",",","frameB",",","amplification_factor",")",":","in_frames","=","[","load_train_data","(","[","frameA",",","frameB",",","frameB","]",",","gray_scale","=","self",".","n_channels","==","1",",","is_testing","=","True",")","]","in_frames","=","np",".","array","(","in_frames",")",".","astype","(","np",".","float32",")","out_amp","=","self",".","sess",".","run","(","self",".","test_output",",","feed_dict","=","{","self",".","test_input",":","in_frames",",","self",".","test_amplification_factor",":","[","amplification_factor","]","}",")","return","out_amp"],"url":"https:\/\/github.com\/12dmodel\/deep_motion_mag\/blob\/485243bd7428d08059c313321b5e6ebfd7f61991\/magnet.py#L217-L233"}
{"nwo":"12dmodel\/deep_motion_mag","sha":"485243bd7428d08059c313321b5e6ebfd7f61991","path":"magnet.py","language":"python","identifier":"MagNet3Frames.run","parameters":"(self,\n checkpoint_dir,\n vid_dir,\n frame_ext,\n out_dir,\n amplification_factor,\n velocity_mag=False)","argument_list":"","return_statement":"","docstring":"Magnify a video in the two-frames mode.\n\n Args:\n checkpoint_dir: checkpoint directory.\n vid_dir: directory containing video frames videos are processed\n in sorted order.\n out_dir: directory to place output frames and resulting video.\n amplification_factor: the amplification factor,\n with 0 being no change.\n velocity_mag: if True, process video in Dynamic mode.","docstring_summary":"Magnify a video in the two-frames mode.","docstring_tokens":["Magnify","a","video","in","the","two","-","frames","mode","."],"function":"def run(self,\n checkpoint_dir,\n vid_dir,\n frame_ext,\n out_dir,\n amplification_factor,\n velocity_mag=False):\n \"\"\"Magnify a video in the two-frames mode.\n\n Args:\n checkpoint_dir: checkpoint directory.\n vid_dir: directory containing video frames videos are processed\n in sorted order.\n out_dir: directory to place output frames and resulting video.\n amplification_factor: the amplification factor,\n with 0 being no change.\n velocity_mag: if True, process video in Dynamic mode.\n \"\"\"\n vid_name = os.path.basename(out_dir)\n # make folder\n mkdir(out_dir)\n vid_frames = sorted(glob(os.path.join(vid_dir, '*.' + frame_ext)))\n first_frame = vid_frames[0]\n im = imread(first_frame)\n image_height, image_width = im.shape\n if not self.is_graph_built:\n self.setup_for_inference(checkpoint_dir, image_width, image_height)\n try:\n i = int(self.ckpt_name.split('-')[-1])\n print(\"Iteration number is {:d}\".format(i))\n vid_name = vid_name + '_' + str(i)\n except:\n print(\"Cannot get iteration number\")\n if velocity_mag:\n print(\"Running in Dynamic mode\")\n\n prev_frame = first_frame\n desc = vid_name if len(vid_name) < 10 else vid_name[:10]\n for frame in tqdm(vid_frames, desc=desc):\n file_name = os.path.basename(frame)\n out_amp = self.inference(prev_frame, frame, amplification_factor)\n\n im_path = os.path.join(out_dir, file_name)\n save_images(out_amp, [1, 1], im_path)\n if velocity_mag:\n prev_frame = frame\n\n # Try to combine it into a video\n call([DEFAULT_VIDEO_CONVERTER, '-y', '-f', 'image2', '-r', '30', '-i',\n os.path.join(out_dir, '%06d.png'), '-c:v', 'libx264',\n os.path.join(out_dir, vid_name + '.mp4')]\n )","function_tokens":["def","run","(","self",",","checkpoint_dir",",","vid_dir",",","frame_ext",",","out_dir",",","amplification_factor",",","velocity_mag","=","False",")",":","vid_name","=","os",".","path",".","basename","(","out_dir",")","# make folder","mkdir","(","out_dir",")","vid_frames","=","sorted","(","glob","(","os",".","path",".","join","(","vid_dir",",","'*.'","+","frame_ext",")",")",")","first_frame","=","vid_frames","[","0","]","im","=","imread","(","first_frame",")","image_height",",","image_width","=","im",".","shape","if","not","self",".","is_graph_built",":","self",".","setup_for_inference","(","checkpoint_dir",",","image_width",",","image_height",")","try",":","i","=","int","(","self",".","ckpt_name",".","split","(","'-'",")","[","-","1","]",")","print","(","\"Iteration number is {:d}\"",".","format","(","i",")",")","vid_name","=","vid_name","+","'_'","+","str","(","i",")","except",":","print","(","\"Cannot get iteration number\"",")","if","velocity_mag",":","print","(","\"Running in Dynamic mode\"",")","prev_frame","=","first_frame","desc","=","vid_name","if","len","(","vid_name",")","<","10","else","vid_name","[",":","10","]","for","frame","in","tqdm","(","vid_frames",",","desc","=","desc",")",":","file_name","=","os",".","path",".","basename","(","frame",")","out_amp","=","self",".","inference","(","prev_frame",",","frame",",","amplification_factor",")","im_path","=","os",".","path",".","join","(","out_dir",",","file_name",")","save_images","(","out_amp",",","[","1",",","1","]",",","im_path",")","if","velocity_mag",":","prev_frame","=","frame","# Try to combine it into a video","call","(","[","DEFAULT_VIDEO_CONVERTER",",","'-y'",",","'-f'",",","'image2'",",","'-r'",",","'30'",",","'-i'",",","os",".","path",".","join","(","out_dir",",","'%06d.png'",")",",","'-c:v'",",","'libx264'",",","os",".","path",".","join","(","out_dir",",","vid_name","+","'.mp4'",")","]",")"],"url":"https:\/\/github.com\/12dmodel\/deep_motion_mag\/blob\/485243bd7428d08059c313321b5e6ebfd7f61991\/magnet.py#L235-L286"}
{"nwo":"12dmodel\/deep_motion_mag","sha":"485243bd7428d08059c313321b5e6ebfd7f61991","path":"magnet.py","language":"python","identifier":"MagNet3Frames._build_IIR_filtering_graphs","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Assume a_0 = 1","docstring_summary":"Assume a_0 = 1","docstring_tokens":["Assume","a_0","=","1"],"function":"def _build_IIR_filtering_graphs(self):\n \"\"\"\n Assume a_0 = 1\n \"\"\"\n self.input_image = tf.placeholder(tf.float32,\n [1, self.image_height,\n self.image_width,\n self.n_channels],\n name='input_image')\n self.filtered_enc = tf.placeholder(tf.float32,\n [1, None, None,\n self.shape_dims],\n name='filtered_enc')\n self.out_texture_enc = tf.placeholder(tf.float32,\n [1, None, None,\n self.texture_dims],\n name='out_texture_enc')\n self.ref_shape_enc = tf.placeholder(tf.float32,\n [1, None, None,\n self.shape_dims],\n name='ref_shape_enc')\n self.amplification_factor = tf.placeholder(tf.float32, [None],\n name='amplification_factor')\n with tf.variable_scope('ynet_3frames'):\n with tf.variable_scope('encoder'):\n self.texture_enc, self.shape_rep = \\\n self._encoder(self.input_image)\n with tf.variable_scope('manipulator'):\n # set encoder a to zero because we do temporal filtering\n # instead of taking the difference.\n self.out_shape_enc = self.manipulator(0.0,\n self.filtered_enc,\n self.amplification_factor)\n self.out_shape_enc += self.ref_shape_enc - self.filtered_enc\n with tf.variable_scope('decoder'):\n self.output_image = tf.clip_by_value(\n self._decoder(self.out_texture_enc,\n self.out_shape_enc),\n -1.0, 1.0)\n\n self.saver = tf.train.Saver()","function_tokens":["def","_build_IIR_filtering_graphs","(","self",")",":","self",".","input_image","=","tf",".","placeholder","(","tf",".","float32",",","[","1",",","self",".","image_height",",","self",".","image_width",",","self",".","n_channels","]",",","name","=","'input_image'",")","self",".","filtered_enc","=","tf",".","placeholder","(","tf",".","float32",",","[","1",",","None",",","None",",","self",".","shape_dims","]",",","name","=","'filtered_enc'",")","self",".","out_texture_enc","=","tf",".","placeholder","(","tf",".","float32",",","[","1",",","None",",","None",",","self",".","texture_dims","]",",","name","=","'out_texture_enc'",")","self",".","ref_shape_enc","=","tf",".","placeholder","(","tf",".","float32",",","[","1",",","None",",","None",",","self",".","shape_dims","]",",","name","=","'ref_shape_enc'",")","self",".","amplification_factor","=","tf",".","placeholder","(","tf",".","float32",",","[","None","]",",","name","=","'amplification_factor'",")","with","tf",".","variable_scope","(","'ynet_3frames'",")",":","with","tf",".","variable_scope","(","'encoder'",")",":","self",".","texture_enc",",","self",".","shape_rep","=","self",".","_encoder","(","self",".","input_image",")","with","tf",".","variable_scope","(","'manipulator'",")",":","# set encoder a to zero because we do temporal filtering","# instead of taking the difference.","self",".","out_shape_enc","=","self",".","manipulator","(","0.0",",","self",".","filtered_enc",",","self",".","amplification_factor",")","self",".","out_shape_enc","+=","self",".","ref_shape_enc","-","self",".","filtered_enc","with","tf",".","variable_scope","(","'decoder'",")",":","self",".","output_image","=","tf",".","clip_by_value","(","self",".","_decoder","(","self",".","out_texture_enc",",","self",".","out_shape_enc",")",",","-","1.0",",","1.0",")","self",".","saver","=","tf",".","train",".","Saver","(",")"],"url":"https:\/\/github.com\/12dmodel\/deep_motion_mag\/blob\/485243bd7428d08059c313321b5e6ebfd7f61991\/magnet.py#L289-L329"}
{"nwo":"12dmodel\/deep_motion_mag","sha":"485243bd7428d08059c313321b5e6ebfd7f61991","path":"magnet.py","language":"python","identifier":"MagNet3Frames.run_temporal","parameters":"(self,\n checkpoint_dir,\n vid_dir,\n frame_ext,\n out_dir,\n amplification_factor,\n fl, fh, fs,\n n_filter_tap,\n filter_type)","argument_list":"","return_statement":"","docstring":"Magnify video with a temporal filter.\n\n Args:\n checkpoint_dir: checkpoint directory.\n vid_dir: directory containing video frames videos are processed\n in sorted order.\n out_dir: directory to place output frames and resulting video.\n amplification_factor: the amplification factor,\n with 0 being no change.\n fl: low cutoff frequency.\n fh: high cutoff frequency.\n fs: sampling rate of the video.\n n_filter_tap: number of filter tap to use.\n filter_type: Type of filter to use. Can be one of \"fir\",\n \"butter\", or \"differenceOfIIR\". For \"differenceOfIIR\",\n fl and fh specifies rl and rh coefficients as in Wadhwa et al.","docstring_summary":"Magnify video with a temporal filter.","docstring_tokens":["Magnify","video","with","a","temporal","filter","."],"function":"def run_temporal(self,\n checkpoint_dir,\n vid_dir,\n frame_ext,\n out_dir,\n amplification_factor,\n fl, fh, fs,\n n_filter_tap,\n filter_type):\n \"\"\"Magnify video with a temporal filter.\n\n Args:\n checkpoint_dir: checkpoint directory.\n vid_dir: directory containing video frames videos are processed\n in sorted order.\n out_dir: directory to place output frames and resulting video.\n amplification_factor: the amplification factor,\n with 0 being no change.\n fl: low cutoff frequency.\n fh: high cutoff frequency.\n fs: sampling rate of the video.\n n_filter_tap: number of filter tap to use.\n filter_type: Type of filter to use. Can be one of \"fir\",\n \"butter\", or \"differenceOfIIR\". For \"differenceOfIIR\",\n fl and fh specifies rl and rh coefficients as in Wadhwa et al.\n \"\"\"\n\n nyq = fs \/ 2.0\n if filter_type == 'fir':\n filter_b = firwin(n_filter_tap, [fl, fh], nyq=nyq, pass_zero=False)\n filter_a = []\n elif filter_type == 'butter':\n filter_b, filter_a = butter(n_filter_tap, [fl\/nyq, fh\/nyq],\n btype='bandpass')\n filter_a = filter_a[1:]\n elif filter_type == 'differenceOfIIR':\n # This is a copy of what Neal did. Number of taps are ignored.\n # Treat fl and fh as rl and rh as in Wadhwa's code.\n # Write down the difference of difference equation in Fourier\n # domain to proof this:\n filter_b = [fh - fl, fl - fh]\n filter_a = [-1.0*(2.0 - fh - fl), (1.0 - fl) * (1.0 - fh)]\n else:\n raise ValueError('Filter type must be either '\n '[\"fir\", \"butter\", \"differenceOfIIR\"] got ' + \\\n filter_type)\n head, tail = os.path.split(out_dir)\n tail = tail + '_fl{}_fh{}_fs{}_n{}_{}'.format(fl, fh, fs,\n n_filter_tap,\n filter_type)\n out_dir = os.path.join(head, tail)\n vid_name = os.path.basename(out_dir)\n # make folder\n mkdir(out_dir)\n vid_frames = sorted(glob(os.path.join(vid_dir, '*.' + frame_ext)))\n first_frame = vid_frames[0]\n im = imread(first_frame)\n image_height, image_width = im.shape\n if not self.is_graph_built:\n self.image_width = image_width\n self.image_height = image_height\n # Figure out image dimension\n self._build_IIR_filtering_graphs()\n ginit_op = tf.global_variables_initializer()\n linit_op = tf.local_variables_initializer()\n self.sess.run([ginit_op, linit_op])\n\n if self.load(checkpoint_dir):\n print(\"[*] Load Success\")\n else:\n raise RuntimeError('MagNet: Failed to load checkpoint file.')\n self.is_graph_built = True\n try:\n i = int(self.ckpt_name.split('-')[-1])\n print(\"Iteration number is {:d}\".format(i))\n vid_name = vid_name + '_' + str(i)\n except:\n print(\"Cannot get iteration number\")\n\n if len(filter_a) is not 0:\n x_state = []\n y_state = []\n\n for frame in tqdm(vid_frames, desc='Applying IIR'):\n file_name = os.path.basename(frame)\n frame_no, _ = os.path.splitext(file_name)\n frame_no = int(frame_no)\n in_frames = [load_train_data([frame, frame, frame],\n gray_scale=self.n_channels==1, is_testing=True)]\n in_frames = np.array(in_frames).astype(np.float32)\n\n texture_enc, x = self.sess.run([self.texture_enc, self.shape_rep],\n feed_dict={\n self.input_image:\n in_frames[:, :, :, :3],})\n x_state.insert(0, x)\n # set up initial condition.\n while len(x_state) < len(filter_b):\n x_state.insert(0, x)\n if len(x_state) > len(filter_b):\n x_state = x_state[:len(filter_b)]\n y = np.zeros_like(x)\n for i in range(len(x_state)):\n y += x_state[i] * filter_b[i]\n for i in range(len(y_state)):\n y -= y_state[i] * filter_a[i]\n # update y state\n y_state.insert(0, y)\n if len(y_state) > len(filter_a):\n y_state = y_state[:len(filter_a)]\n\n out_amp = self.sess.run(self.output_image,\n feed_dict={self.out_texture_enc:\n texture_enc,\n self.filtered_enc: y,\n self.ref_shape_enc: x,\n self.amplification_factor:\n [amplification_factor]})\n\n im_path = os.path.join(out_dir, file_name)\n out_amp = np.squeeze(out_amp)\n out_amp = (127.5*(out_amp+1)).astype('uint8')\n cv2.imwrite(im_path, cv2.cvtColor(out_amp,\n code=cv2.COLOR_RGB2BGR))\n else:\n # This does FIR in fourier domain. Equivalent to cyclic\n # convolution.\n x_state = None\n for i, frame in tqdm(enumerate(vid_frames),\n desc='Getting encoding'):\n file_name = os.path.basename(frame)\n in_frames = [load_train_data([frame, frame, frame],\n gray_scale=self.n_channels==1, is_testing=True)]\n in_frames = np.array(in_frames).astype(np.float32)\n\n texture_enc, x = self.sess.run([self.texture_enc, self.shape_rep],\n feed_dict={\n self.input_image:\n in_frames[:, :, :, :3],})\n if x_state is None:\n x_state = np.zeros(x.shape + (len(vid_frames),),\n dtype='float32')\n x_state[:, :, :, :, i] = x\n\n filter_fft = np.fft.fft(np.fft.ifftshift(filter_b),\n n=x_state.shape[-1])\n # Filtering\n for i in trange(x_state.shape[1], desc=\"Applying FIR filter\"):\n x_fft = np.fft.fft(x_state[:, i, :, :], axis=-1)\n x_fft *= filter_fft[np.newaxis, np.newaxis, np.newaxis, :]\n x_state[:, i, :, :] = np.fft.ifft(x_fft)\n\n for i, frame in tqdm(enumerate(vid_frames), desc='Decoding'):\n file_name = os.path.basename(frame)\n frame_no, _ = os.path.splitext(file_name)\n frame_no = int(frame_no)\n in_frames = [load_train_data([frame, frame, frame],\n gray_scale=self.n_channels==1, is_testing=True)]\n in_frames = np.array(in_frames).astype(np.float32)\n texture_enc, _ = self.sess.run([self.texture_enc, self.shape_rep],\n feed_dict={\n self.input_image:\n in_frames[:, :, :, :3],\n })\n out_amp = self.sess.run(self.output_image,\n feed_dict={self.out_texture_enc: texture_enc,\n self.filtered_enc: x_state[:, :, :, :, i],\n self.ref_shape_enc: x,\n self.amplification_factor: [amplification_factor]})\n\n im_path = os.path.join(out_dir, file_name)\n out_amp = np.squeeze(out_amp)\n out_amp = (127.5*(out_amp+1)).astype('uint8')\n cv2.imwrite(im_path, cv2.cvtColor(out_amp,\n code=cv2.COLOR_RGB2BGR))\n del x_state\n\n # Try to combine it into a video\n call([DEFAULT_VIDEO_CONVERTER, '-y', '-f', 'image2', '-r', '30', '-i',\n os.path.join(out_dir, '%06d.png'), '-c:v', 'libx264',\n os.path.join(out_dir, vid_name + '.mp4')]\n )","function_tokens":["def","run_temporal","(","self",",","checkpoint_dir",",","vid_dir",",","frame_ext",",","out_dir",",","amplification_factor",",","fl",",","fh",",","fs",",","n_filter_tap",",","filter_type",")",":","nyq","=","fs","\/","2.0","if","filter_type","==","'fir'",":","filter_b","=","firwin","(","n_filter_tap",",","[","fl",",","fh","]",",","nyq","=","nyq",",","pass_zero","=","False",")","filter_a","=","[","]","elif","filter_type","==","'butter'",":","filter_b",",","filter_a","=","butter","(","n_filter_tap",",","[","fl","\/","nyq",",","fh","\/","nyq","]",",","btype","=","'bandpass'",")","filter_a","=","filter_a","[","1",":","]","elif","filter_type","==","'differenceOfIIR'",":","# This is a copy of what Neal did. Number of taps are ignored.","# Treat fl and fh as rl and rh as in Wadhwa's code.","# Write down the difference of difference equation in Fourier","# domain to proof this:","filter_b","=","[","fh","-","fl",",","fl","-","fh","]","filter_a","=","[","-","1.0","*","(","2.0","-","fh","-","fl",")",",","(","1.0","-","fl",")","*","(","1.0","-","fh",")","]","else",":","raise","ValueError","(","'Filter type must be either '","'[\"fir\", \"butter\", \"differenceOfIIR\"] got '","+","filter_type",")","head",",","tail","=","os",".","path",".","split","(","out_dir",")","tail","=","tail","+","'_fl{}_fh{}_fs{}_n{}_{}'",".","format","(","fl",",","fh",",","fs",",","n_filter_tap",",","filter_type",")","out_dir","=","os",".","path",".","join","(","head",",","tail",")","vid_name","=","os",".","path",".","basename","(","out_dir",")","# make folder","mkdir","(","out_dir",")","vid_frames","=","sorted","(","glob","(","os",".","path",".","join","(","vid_dir",",","'*.'","+","frame_ext",")",")",")","first_frame","=","vid_frames","[","0","]","im","=","imread","(","first_frame",")","image_height",",","image_width","=","im",".","shape","if","not","self",".","is_graph_built",":","self",".","image_width","=","image_width","self",".","image_height","=","image_height","# Figure out image dimension","self",".","_build_IIR_filtering_graphs","(",")","ginit_op","=","tf",".","global_variables_initializer","(",")","linit_op","=","tf",".","local_variables_initializer","(",")","self",".","sess",".","run","(","[","ginit_op",",","linit_op","]",")","if","self",".","load","(","checkpoint_dir",")",":","print","(","\"[*] Load Success\"",")","else",":","raise","RuntimeError","(","'MagNet: Failed to load checkpoint file.'",")","self",".","is_graph_built","=","True","try",":","i","=","int","(","self",".","ckpt_name",".","split","(","'-'",")","[","-","1","]",")","print","(","\"Iteration number is {:d}\"",".","format","(","i",")",")","vid_name","=","vid_name","+","'_'","+","str","(","i",")","except",":","print","(","\"Cannot get iteration number\"",")","if","len","(","filter_a",")","is","not","0",":","x_state","=","[","]","y_state","=","[","]","for","frame","in","tqdm","(","vid_frames",",","desc","=","'Applying IIR'",")",":","file_name","=","os",".","path",".","basename","(","frame",")","frame_no",",","_","=","os",".","path",".","splitext","(","file_name",")","frame_no","=","int","(","frame_no",")","in_frames","=","[","load_train_data","(","[","frame",",","frame",",","frame","]",",","gray_scale","=","self",".","n_channels","==","1",",","is_testing","=","True",")","]","in_frames","=","np",".","array","(","in_frames",")",".","astype","(","np",".","float32",")","texture_enc",",","x","=","self",".","sess",".","run","(","[","self",".","texture_enc",",","self",".","shape_rep","]",",","feed_dict","=","{","self",".","input_image",":","in_frames","[",":",",",":",",",":",",",":","3","]",",","}",")","x_state",".","insert","(","0",",","x",")","# set up initial condition.","while","len","(","x_state",")","<","len","(","filter_b",")",":","x_state",".","insert","(","0",",","x",")","if","len","(","x_state",")",">","len","(","filter_b",")",":","x_state","=","x_state","[",":","len","(","filter_b",")","]","y","=","np",".","zeros_like","(","x",")","for","i","in","range","(","len","(","x_state",")",")",":","y","+=","x_state","[","i","]","*","filter_b","[","i","]","for","i","in","range","(","len","(","y_state",")",")",":","y","-=","y_state","[","i","]","*","filter_a","[","i","]","# update y state","y_state",".","insert","(","0",",","y",")","if","len","(","y_state",")",">","len","(","filter_a",")",":","y_state","=","y_state","[",":","len","(","filter_a",")","]","out_amp","=","self",".","sess",".","run","(","self",".","output_image",",","feed_dict","=","{","self",".","out_texture_enc",":","texture_enc",",","self",".","filtered_enc",":","y",",","self",".","ref_shape_enc",":","x",",","self",".","amplification_factor",":","[","amplification_factor","]","}",")","im_path","=","os",".","path",".","join","(","out_dir",",","file_name",")","out_amp","=","np",".","squeeze","(","out_amp",")","out_amp","=","(","127.5","*","(","out_amp","+","1",")",")",".","astype","(","'uint8'",")","cv2",".","imwrite","(","im_path",",","cv2",".","cvtColor","(","out_amp",",","code","=","cv2",".","COLOR_RGB2BGR",")",")","else",":","# This does FIR in fourier domain. Equivalent to cyclic","# convolution.","x_state","=","None","for","i",",","frame","in","tqdm","(","enumerate","(","vid_frames",")",",","desc","=","'Getting encoding'",")",":","file_name","=","os",".","path",".","basename","(","frame",")","in_frames","=","[","load_train_data","(","[","frame",",","frame",",","frame","]",",","gray_scale","=","self",".","n_channels","==","1",",","is_testing","=","True",")","]","in_frames","=","np",".","array","(","in_frames",")",".","astype","(","np",".","float32",")","texture_enc",",","x","=","self",".","sess",".","run","(","[","self",".","texture_enc",",","self",".","shape_rep","]",",","feed_dict","=","{","self",".","input_image",":","in_frames","[",":",",",":",",",":",",",":","3","]",",","}",")","if","x_state","is","None",":","x_state","=","np",".","zeros","(","x",".","shape","+","(","len","(","vid_frames",")",",",")",",","dtype","=","'float32'",")","x_state","[",":",",",":",",",":",",",":",",","i","]","=","x","filter_fft","=","np",".","fft",".","fft","(","np",".","fft",".","ifftshift","(","filter_b",")",",","n","=","x_state",".","shape","[","-","1","]",")","# Filtering","for","i","in","trange","(","x_state",".","shape","[","1","]",",","desc","=","\"Applying FIR filter\"",")",":","x_fft","=","np",".","fft",".","fft","(","x_state","[",":",",","i",",",":",",",":","]",",","axis","=","-","1",")","x_fft","*=","filter_fft","[","np",".","newaxis",",","np",".","newaxis",",","np",".","newaxis",",",":","]","x_state","[",":",",","i",",",":",",",":","]","=","np",".","fft",".","ifft","(","x_fft",")","for","i",",","frame","in","tqdm","(","enumerate","(","vid_frames",")",",","desc","=","'Decoding'",")",":","file_name","=","os",".","path",".","basename","(","frame",")","frame_no",",","_","=","os",".","path",".","splitext","(","file_name",")","frame_no","=","int","(","frame_no",")","in_frames","=","[","load_train_data","(","[","frame",",","frame",",","frame","]",",","gray_scale","=","self",".","n_channels","==","1",",","is_testing","=","True",")","]","in_frames","=","np",".","array","(","in_frames",")",".","astype","(","np",".","float32",")","texture_enc",",","_","=","self",".","sess",".","run","(","[","self",".","texture_enc",",","self",".","shape_rep","]",",","feed_dict","=","{","self",".","input_image",":","in_frames","[",":",",",":",",",":",",",":","3","]",",","}",")","out_amp","=","self",".","sess",".","run","(","self",".","output_image",",","feed_dict","=","{","self",".","out_texture_enc",":","texture_enc",",","self",".","filtered_enc",":","x_state","[",":",",",":",",",":",",",":",",","i","]",",","self",".","ref_shape_enc",":","x",",","self",".","amplification_factor",":","[","amplification_factor","]","}",")","im_path","=","os",".","path",".","join","(","out_dir",",","file_name",")","out_amp","=","np",".","squeeze","(","out_amp",")","out_amp","=","(","127.5","*","(","out_amp","+","1",")",")",".","astype","(","'uint8'",")","cv2",".","imwrite","(","im_path",",","cv2",".","cvtColor","(","out_amp",",","code","=","cv2",".","COLOR_RGB2BGR",")",")","del","x_state","# Try to combine it into a video","call","(","[","DEFAULT_VIDEO_CONVERTER",",","'-y'",",","'-f'",",","'image2'",",","'-r'",",","'30'",",","'-i'",",","os",".","path",".","join","(","out_dir",",","'%06d.png'",")",",","'-c:v'",",","'libx264'",",","os",".","path",".","join","(","out_dir",",","vid_name","+","'.mp4'",")","]",")"],"url":"https:\/\/github.com\/12dmodel\/deep_motion_mag\/blob\/485243bd7428d08059c313321b5e6ebfd7f61991\/magnet.py#L331-L512"}